gcc/
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blobf81ce6136375dad1f7f703af23199f3a2e51869f
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2016 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "cfgloop.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "expmed.h"
34 #include "optabs.h"
35 #include "regs.h"
36 #include "ira.h"
37 #include "recog.h"
38 #include "cgraph.h"
39 #include "diagnostic-core.h"
40 #include "insn-attr.h"
41 #include "flags.h"
42 #include "alias.h"
43 #include "fold-const.h"
44 #include "stor-layout.h"
45 #include "calls.h"
46 #include "print-tree.h"
47 #include "varasm.h"
48 #include "explow.h"
49 #include "expr.h"
50 #include "output.h"
51 #include "dbxout.h"
52 #include "common/common-target.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "sched-int.h"
56 #include "gimplify.h"
57 #include "gimple-iterator.h"
58 #include "gimple-walk.h"
59 #include "intl.h"
60 #include "params.h"
61 #include "tm-constrs.h"
62 #include "tree-vectorizer.h"
63 #include "target-globals.h"
64 #include "builtins.h"
65 #include "context.h"
66 #include "tree-pass.h"
67 #if TARGET_XCOFF
68 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
69 #endif
70 #if TARGET_MACHO
71 #include "gstab.h" /* for N_SLINE */
72 #endif
73 #include "case-cfn-macros.h"
74 #include "ppc-auxv.h"
76 /* This file should be included last. */
77 #include "target-def.h"
79 #ifndef TARGET_NO_PROTOTYPE
80 #define TARGET_NO_PROTOTYPE 0
81 #endif
83 #define min(A,B) ((A) < (B) ? (A) : (B))
84 #define max(A,B) ((A) > (B) ? (A) : (B))
86 /* Structure used to define the rs6000 stack */
87 typedef struct rs6000_stack {
88 int reload_completed; /* stack info won't change from here on */
89 int first_gp_reg_save; /* first callee saved GP register used */
90 int first_fp_reg_save; /* first callee saved FP register used */
91 int first_altivec_reg_save; /* first callee saved AltiVec register used */
92 int lr_save_p; /* true if the link reg needs to be saved */
93 int cr_save_p; /* true if the CR reg needs to be saved */
94 unsigned int vrsave_mask; /* mask of vec registers to save */
95 int push_p; /* true if we need to allocate stack space */
96 int calls_p; /* true if the function makes any calls */
97 int world_save_p; /* true if we're saving *everything*:
98 r13-r31, cr, f14-f31, vrsave, v20-v31 */
99 enum rs6000_abi abi; /* which ABI to use */
100 int gp_save_offset; /* offset to save GP regs from initial SP */
101 int fp_save_offset; /* offset to save FP regs from initial SP */
102 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
103 int lr_save_offset; /* offset to save LR from initial SP */
104 int cr_save_offset; /* offset to save CR from initial SP */
105 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
106 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
107 int varargs_save_offset; /* offset to save the varargs registers */
108 int ehrd_offset; /* offset to EH return data */
109 int ehcr_offset; /* offset to EH CR field data */
110 int reg_size; /* register size (4 or 8) */
111 HOST_WIDE_INT vars_size; /* variable save area size */
112 int parm_size; /* outgoing parameter size */
113 int save_size; /* save area size */
114 int fixed_size; /* fixed size of stack frame */
115 int gp_size; /* size of saved GP registers */
116 int fp_size; /* size of saved FP registers */
117 int altivec_size; /* size of saved AltiVec registers */
118 int cr_size; /* size to hold CR if not in fixed area */
119 int vrsave_size; /* size to hold VRSAVE */
120 int altivec_padding_size; /* size of altivec alignment padding */
121 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
122 int spe_padding_size;
123 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
124 int spe_64bit_regs_used;
125 int savres_strategy;
126 } rs6000_stack_t;
128 /* A C structure for machine-specific, per-function data.
129 This is added to the cfun structure. */
130 typedef struct GTY(()) machine_function
132 /* Whether the instruction chain has been scanned already. */
133 int spe_insn_chain_scanned_p;
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame;
136 /* Flags if __builtin_return_address (0) was used. */
137 int ra_need_lr;
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
139 int lr_save_state;
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset;
146 /* Temporary stack slot to use for SDmode copies. This slot is
147 64-bits wide and is allocated early enough so that the offset
148 does not overflow the 16-bit load/store offset field. */
149 rtx sdmode_stack_slot;
150 /* Alternative internal arg pointer for -fsplit-stack. */
151 rtx split_stack_arg_pointer;
152 bool split_stack_argp_used;
153 /* Flag if r2 setup is needed with ELFv2 ABI. */
154 bool r2_setup_needed;
155 } machine_function;
157 /* Support targetm.vectorize.builtin_mask_for_load. */
158 static GTY(()) tree altivec_builtin_mask_for_load;
160 /* Set to nonzero once AIX common-mode calls have been defined. */
161 static GTY(()) int common_mode_defined;
163 /* Label number of label created for -mrelocatable, to call to so we can
164 get the address of the GOT section */
165 static int rs6000_pic_labelno;
167 #ifdef USING_ELFOS_H
168 /* Counter for labels which are to be placed in .fixup. */
169 int fixuplabelno = 0;
170 #endif
172 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
173 int dot_symbols;
175 /* Specify the machine mode that pointers have. After generation of rtl, the
176 compiler makes no further distinction between pointers and any other objects
177 of this machine mode. The type is unsigned since not all things that
178 include rs6000.h also include machmode.h. */
179 unsigned rs6000_pmode;
181 /* Width in bits of a pointer. */
182 unsigned rs6000_pointer_size;
184 #ifdef HAVE_AS_GNU_ATTRIBUTE
185 /* Flag whether floating point values have been passed/returned. */
186 static bool rs6000_passes_float;
187 /* Flag whether vector values have been passed/returned. */
188 static bool rs6000_passes_vector;
189 /* Flag whether small (<= 8 byte) structures have been returned. */
190 static bool rs6000_returns_struct;
191 #endif
193 /* Value is TRUE if register/mode pair is acceptable. */
194 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
196 /* Maximum number of registers needed for a given register class and mode. */
197 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
199 /* How many registers are needed for a given register and mode. */
200 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
202 /* Map register number to register class. */
203 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
205 static int dbg_cost_ctrl;
207 /* Built in types. */
208 tree rs6000_builtin_types[RS6000_BTI_MAX];
209 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
211 /* Flag to say the TOC is initialized */
212 int toc_initialized, need_toc_init;
213 char toc_label_name[10];
215 /* Cached value of rs6000_variable_issue. This is cached in
216 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
217 static short cached_can_issue_more;
219 static GTY(()) section *read_only_data_section;
220 static GTY(()) section *private_data_section;
221 static GTY(()) section *tls_data_section;
222 static GTY(()) section *tls_private_data_section;
223 static GTY(()) section *read_only_private_data_section;
224 static GTY(()) section *sdata2_section;
225 static GTY(()) section *toc_section;
227 struct builtin_description
229 const HOST_WIDE_INT mask;
230 const enum insn_code icode;
231 const char *const name;
232 const enum rs6000_builtins code;
235 /* Describe the vector unit used for modes. */
236 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
237 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
239 /* Register classes for various constraints that are based on the target
240 switches. */
241 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
243 /* Describe the alignment of a vector. */
244 int rs6000_vector_align[NUM_MACHINE_MODES];
246 /* Map selected modes to types for builtins. */
247 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
249 /* What modes to automatically generate reciprocal divide estimate (fre) and
250 reciprocal sqrt (frsqrte) for. */
251 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
253 /* Masks to determine which reciprocal esitmate instructions to generate
254 automatically. */
255 enum rs6000_recip_mask {
256 RECIP_SF_DIV = 0x001, /* Use divide estimate */
257 RECIP_DF_DIV = 0x002,
258 RECIP_V4SF_DIV = 0x004,
259 RECIP_V2DF_DIV = 0x008,
261 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
262 RECIP_DF_RSQRT = 0x020,
263 RECIP_V4SF_RSQRT = 0x040,
264 RECIP_V2DF_RSQRT = 0x080,
266 /* Various combination of flags for -mrecip=xxx. */
267 RECIP_NONE = 0,
268 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
269 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
270 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
272 RECIP_HIGH_PRECISION = RECIP_ALL,
274 /* On low precision machines like the power5, don't enable double precision
275 reciprocal square root estimate, since it isn't accurate enough. */
276 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
279 /* -mrecip options. */
280 static struct
282 const char *string; /* option name */
283 unsigned int mask; /* mask bits to set */
284 } recip_options[] = {
285 { "all", RECIP_ALL },
286 { "none", RECIP_NONE },
287 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
288 | RECIP_V2DF_DIV) },
289 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
290 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
291 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
292 | RECIP_V2DF_RSQRT) },
293 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
294 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
297 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
298 static const struct
300 const char *cpu;
301 unsigned int cpuid;
302 } cpu_is_info[] = {
303 { "power9", PPC_PLATFORM_POWER9 },
304 { "power8", PPC_PLATFORM_POWER8 },
305 { "power7", PPC_PLATFORM_POWER7 },
306 { "power6x", PPC_PLATFORM_POWER6X },
307 { "power6", PPC_PLATFORM_POWER6 },
308 { "power5+", PPC_PLATFORM_POWER5_PLUS },
309 { "power5", PPC_PLATFORM_POWER5 },
310 { "ppc970", PPC_PLATFORM_PPC970 },
311 { "power4", PPC_PLATFORM_POWER4 },
312 { "ppca2", PPC_PLATFORM_PPCA2 },
313 { "ppc476", PPC_PLATFORM_PPC476 },
314 { "ppc464", PPC_PLATFORM_PPC464 },
315 { "ppc440", PPC_PLATFORM_PPC440 },
316 { "ppc405", PPC_PLATFORM_PPC405 },
317 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
320 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
321 static const struct
323 const char *hwcap;
324 int mask;
325 unsigned int id;
326 } cpu_supports_info[] = {
327 /* AT_HWCAP masks. */
328 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
329 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
330 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
331 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
332 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
333 { "booke", PPC_FEATURE_BOOKE, 0 },
334 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
335 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
336 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
337 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
338 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
339 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
340 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
341 { "notb", PPC_FEATURE_NO_TB, 0 },
342 { "pa6t", PPC_FEATURE_PA6T, 0 },
343 { "power4", PPC_FEATURE_POWER4, 0 },
344 { "power5", PPC_FEATURE_POWER5, 0 },
345 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
346 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
347 { "ppc32", PPC_FEATURE_32, 0 },
348 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
349 { "ppc64", PPC_FEATURE_64, 0 },
350 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
351 { "smt", PPC_FEATURE_SMT, 0 },
352 { "spe", PPC_FEATURE_HAS_SPE, 0 },
353 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
354 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
355 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
357 /* AT_HWCAP2 masks. */
358 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
359 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
360 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
361 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
362 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
363 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
364 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
365 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
366 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
367 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
370 /* Newer LIBCs explicitly export this symbol to declare that they provide
371 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
372 reference to this symbol whenever we expand a CPU builtin, so that
373 we never link against an old LIBC. */
374 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
376 /* True if we have expanded a CPU builtin. */
377 bool cpu_builtin_p;
379 /* Pointer to function (in rs6000-c.c) that can define or undefine target
380 macros that have changed. Languages that don't support the preprocessor
381 don't link in rs6000-c.c, so we can't call it directly. */
382 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
384 /* Simplfy register classes into simpler classifications. We assume
385 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
386 check for standard register classes (gpr/floating/altivec/vsx) and
387 floating/vector classes (float/altivec/vsx). */
389 enum rs6000_reg_type {
390 NO_REG_TYPE,
391 PSEUDO_REG_TYPE,
392 GPR_REG_TYPE,
393 VSX_REG_TYPE,
394 ALTIVEC_REG_TYPE,
395 FPR_REG_TYPE,
396 SPR_REG_TYPE,
397 CR_REG_TYPE,
398 SPE_ACC_TYPE,
399 SPEFSCR_REG_TYPE
402 /* Map register class to register type. */
403 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
405 /* First/last register type for the 'normal' register types (i.e. general
406 purpose, floating point, altivec, and VSX registers). */
407 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
409 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
412 /* Register classes we care about in secondary reload or go if legitimate
413 address. We only need to worry about GPR, FPR, and Altivec registers here,
414 along an ANY field that is the OR of the 3 register classes. */
416 enum rs6000_reload_reg_type {
417 RELOAD_REG_GPR, /* General purpose registers. */
418 RELOAD_REG_FPR, /* Traditional floating point regs. */
419 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
420 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
421 N_RELOAD_REG
424 /* For setting up register classes, loop through the 3 register classes mapping
425 into real registers, and skip the ANY class, which is just an OR of the
426 bits. */
427 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
428 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
430 /* Map reload register type to a register in the register class. */
431 struct reload_reg_map_type {
432 const char *name; /* Register class name. */
433 int reg; /* Register in the register class. */
436 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
437 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
438 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
439 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
440 { "Any", -1 }, /* RELOAD_REG_ANY. */
443 /* Mask bits for each register class, indexed per mode. Historically the
444 compiler has been more restrictive which types can do PRE_MODIFY instead of
445 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
446 typedef unsigned char addr_mask_type;
448 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
449 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
450 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
451 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
452 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
453 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
454 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
455 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
457 /* Register type masks based on the type, of valid addressing modes. */
458 struct rs6000_reg_addr {
459 enum insn_code reload_load; /* INSN to reload for loading. */
460 enum insn_code reload_store; /* INSN to reload for storing. */
461 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
462 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
463 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
464 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
465 /* INSNs for fusing addi with loads
466 or stores for each reg. class. */
467 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
468 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
469 /* INSNs for fusing addis with loads
470 or stores for each reg. class. */
471 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
472 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
473 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
474 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
475 bool fused_toc; /* Mode supports TOC fusion. */
478 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
480 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
481 static inline bool
482 mode_supports_pre_incdec_p (machine_mode mode)
484 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
485 != 0);
488 /* Helper function to say whether a mode supports PRE_MODIFY. */
489 static inline bool
490 mode_supports_pre_modify_p (machine_mode mode)
492 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
493 != 0);
496 /* Return true if we have D-form addressing in altivec registers. */
497 static inline bool
498 mode_supports_vmx_dform (machine_mode mode)
500 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
503 /* Return true if we have D-form addressing in VSX registers. This addressing
504 is more limited than normal d-form addressing in that the offset must be
505 aligned on a 16-byte boundary. */
506 static inline bool
507 mode_supports_vsx_dform_quad (machine_mode mode)
509 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
510 != 0);
514 /* Target cpu costs. */
516 struct processor_costs {
517 const int mulsi; /* cost of SImode multiplication. */
518 const int mulsi_const; /* cost of SImode multiplication by constant. */
519 const int mulsi_const9; /* cost of SImode mult by short constant. */
520 const int muldi; /* cost of DImode multiplication. */
521 const int divsi; /* cost of SImode division. */
522 const int divdi; /* cost of DImode division. */
523 const int fp; /* cost of simple SFmode and DFmode insns. */
524 const int dmul; /* cost of DFmode multiplication (and fmadd). */
525 const int sdiv; /* cost of SFmode division (fdivs). */
526 const int ddiv; /* cost of DFmode division (fdiv). */
527 const int cache_line_size; /* cache line size in bytes. */
528 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
529 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
530 const int simultaneous_prefetches; /* number of parallel prefetch
531 operations. */
532 const int sfdf_convert; /* cost of SF->DF conversion. */
535 const struct processor_costs *rs6000_cost;
537 /* Processor costs (relative to an add) */
539 /* Instruction size costs on 32bit processors. */
540 static const
541 struct processor_costs size32_cost = {
542 COSTS_N_INSNS (1), /* mulsi */
543 COSTS_N_INSNS (1), /* mulsi_const */
544 COSTS_N_INSNS (1), /* mulsi_const9 */
545 COSTS_N_INSNS (1), /* muldi */
546 COSTS_N_INSNS (1), /* divsi */
547 COSTS_N_INSNS (1), /* divdi */
548 COSTS_N_INSNS (1), /* fp */
549 COSTS_N_INSNS (1), /* dmul */
550 COSTS_N_INSNS (1), /* sdiv */
551 COSTS_N_INSNS (1), /* ddiv */
552 32, /* cache line size */
553 0, /* l1 cache */
554 0, /* l2 cache */
555 0, /* streams */
556 0, /* SF->DF convert */
559 /* Instruction size costs on 64bit processors. */
560 static const
561 struct processor_costs size64_cost = {
562 COSTS_N_INSNS (1), /* mulsi */
563 COSTS_N_INSNS (1), /* mulsi_const */
564 COSTS_N_INSNS (1), /* mulsi_const9 */
565 COSTS_N_INSNS (1), /* muldi */
566 COSTS_N_INSNS (1), /* divsi */
567 COSTS_N_INSNS (1), /* divdi */
568 COSTS_N_INSNS (1), /* fp */
569 COSTS_N_INSNS (1), /* dmul */
570 COSTS_N_INSNS (1), /* sdiv */
571 COSTS_N_INSNS (1), /* ddiv */
572 128, /* cache line size */
573 0, /* l1 cache */
574 0, /* l2 cache */
575 0, /* streams */
576 0, /* SF->DF convert */
579 /* Instruction costs on RS64A processors. */
580 static const
581 struct processor_costs rs64a_cost = {
582 COSTS_N_INSNS (20), /* mulsi */
583 COSTS_N_INSNS (12), /* mulsi_const */
584 COSTS_N_INSNS (8), /* mulsi_const9 */
585 COSTS_N_INSNS (34), /* muldi */
586 COSTS_N_INSNS (65), /* divsi */
587 COSTS_N_INSNS (67), /* divdi */
588 COSTS_N_INSNS (4), /* fp */
589 COSTS_N_INSNS (4), /* dmul */
590 COSTS_N_INSNS (31), /* sdiv */
591 COSTS_N_INSNS (31), /* ddiv */
592 128, /* cache line size */
593 128, /* l1 cache */
594 2048, /* l2 cache */
595 1, /* streams */
596 0, /* SF->DF convert */
599 /* Instruction costs on MPCCORE processors. */
600 static const
601 struct processor_costs mpccore_cost = {
602 COSTS_N_INSNS (2), /* mulsi */
603 COSTS_N_INSNS (2), /* mulsi_const */
604 COSTS_N_INSNS (2), /* mulsi_const9 */
605 COSTS_N_INSNS (2), /* muldi */
606 COSTS_N_INSNS (6), /* divsi */
607 COSTS_N_INSNS (6), /* divdi */
608 COSTS_N_INSNS (4), /* fp */
609 COSTS_N_INSNS (5), /* dmul */
610 COSTS_N_INSNS (10), /* sdiv */
611 COSTS_N_INSNS (17), /* ddiv */
612 32, /* cache line size */
613 4, /* l1 cache */
614 16, /* l2 cache */
615 1, /* streams */
616 0, /* SF->DF convert */
619 /* Instruction costs on PPC403 processors. */
620 static const
621 struct processor_costs ppc403_cost = {
622 COSTS_N_INSNS (4), /* mulsi */
623 COSTS_N_INSNS (4), /* mulsi_const */
624 COSTS_N_INSNS (4), /* mulsi_const9 */
625 COSTS_N_INSNS (4), /* muldi */
626 COSTS_N_INSNS (33), /* divsi */
627 COSTS_N_INSNS (33), /* divdi */
628 COSTS_N_INSNS (11), /* fp */
629 COSTS_N_INSNS (11), /* dmul */
630 COSTS_N_INSNS (11), /* sdiv */
631 COSTS_N_INSNS (11), /* ddiv */
632 32, /* cache line size */
633 4, /* l1 cache */
634 16, /* l2 cache */
635 1, /* streams */
636 0, /* SF->DF convert */
639 /* Instruction costs on PPC405 processors. */
640 static const
641 struct processor_costs ppc405_cost = {
642 COSTS_N_INSNS (5), /* mulsi */
643 COSTS_N_INSNS (4), /* mulsi_const */
644 COSTS_N_INSNS (3), /* mulsi_const9 */
645 COSTS_N_INSNS (5), /* muldi */
646 COSTS_N_INSNS (35), /* divsi */
647 COSTS_N_INSNS (35), /* divdi */
648 COSTS_N_INSNS (11), /* fp */
649 COSTS_N_INSNS (11), /* dmul */
650 COSTS_N_INSNS (11), /* sdiv */
651 COSTS_N_INSNS (11), /* ddiv */
652 32, /* cache line size */
653 16, /* l1 cache */
654 128, /* l2 cache */
655 1, /* streams */
656 0, /* SF->DF convert */
659 /* Instruction costs on PPC440 processors. */
660 static const
661 struct processor_costs ppc440_cost = {
662 COSTS_N_INSNS (3), /* mulsi */
663 COSTS_N_INSNS (2), /* mulsi_const */
664 COSTS_N_INSNS (2), /* mulsi_const9 */
665 COSTS_N_INSNS (3), /* muldi */
666 COSTS_N_INSNS (34), /* divsi */
667 COSTS_N_INSNS (34), /* divdi */
668 COSTS_N_INSNS (5), /* fp */
669 COSTS_N_INSNS (5), /* dmul */
670 COSTS_N_INSNS (19), /* sdiv */
671 COSTS_N_INSNS (33), /* ddiv */
672 32, /* cache line size */
673 32, /* l1 cache */
674 256, /* l2 cache */
675 1, /* streams */
676 0, /* SF->DF convert */
679 /* Instruction costs on PPC476 processors. */
680 static const
681 struct processor_costs ppc476_cost = {
682 COSTS_N_INSNS (4), /* mulsi */
683 COSTS_N_INSNS (4), /* mulsi_const */
684 COSTS_N_INSNS (4), /* mulsi_const9 */
685 COSTS_N_INSNS (4), /* muldi */
686 COSTS_N_INSNS (11), /* divsi */
687 COSTS_N_INSNS (11), /* divdi */
688 COSTS_N_INSNS (6), /* fp */
689 COSTS_N_INSNS (6), /* dmul */
690 COSTS_N_INSNS (19), /* sdiv */
691 COSTS_N_INSNS (33), /* ddiv */
692 32, /* l1 cache line size */
693 32, /* l1 cache */
694 512, /* l2 cache */
695 1, /* streams */
696 0, /* SF->DF convert */
699 /* Instruction costs on PPC601 processors. */
700 static const
701 struct processor_costs ppc601_cost = {
702 COSTS_N_INSNS (5), /* mulsi */
703 COSTS_N_INSNS (5), /* mulsi_const */
704 COSTS_N_INSNS (5), /* mulsi_const9 */
705 COSTS_N_INSNS (5), /* muldi */
706 COSTS_N_INSNS (36), /* divsi */
707 COSTS_N_INSNS (36), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (5), /* dmul */
710 COSTS_N_INSNS (17), /* sdiv */
711 COSTS_N_INSNS (31), /* ddiv */
712 32, /* cache line size */
713 32, /* l1 cache */
714 256, /* l2 cache */
715 1, /* streams */
716 0, /* SF->DF convert */
719 /* Instruction costs on PPC603 processors. */
720 static const
721 struct processor_costs ppc603_cost = {
722 COSTS_N_INSNS (5), /* mulsi */
723 COSTS_N_INSNS (3), /* mulsi_const */
724 COSTS_N_INSNS (2), /* mulsi_const9 */
725 COSTS_N_INSNS (5), /* muldi */
726 COSTS_N_INSNS (37), /* divsi */
727 COSTS_N_INSNS (37), /* divdi */
728 COSTS_N_INSNS (3), /* fp */
729 COSTS_N_INSNS (4), /* dmul */
730 COSTS_N_INSNS (18), /* sdiv */
731 COSTS_N_INSNS (33), /* ddiv */
732 32, /* cache line size */
733 8, /* l1 cache */
734 64, /* l2 cache */
735 1, /* streams */
736 0, /* SF->DF convert */
739 /* Instruction costs on PPC604 processors. */
740 static const
741 struct processor_costs ppc604_cost = {
742 COSTS_N_INSNS (4), /* mulsi */
743 COSTS_N_INSNS (4), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (4), /* muldi */
746 COSTS_N_INSNS (20), /* divsi */
747 COSTS_N_INSNS (20), /* divdi */
748 COSTS_N_INSNS (3), /* fp */
749 COSTS_N_INSNS (3), /* dmul */
750 COSTS_N_INSNS (18), /* sdiv */
751 COSTS_N_INSNS (32), /* ddiv */
752 32, /* cache line size */
753 16, /* l1 cache */
754 512, /* l2 cache */
755 1, /* streams */
756 0, /* SF->DF convert */
759 /* Instruction costs on PPC604e processors. */
760 static const
761 struct processor_costs ppc604e_cost = {
762 COSTS_N_INSNS (2), /* mulsi */
763 COSTS_N_INSNS (2), /* mulsi_const */
764 COSTS_N_INSNS (2), /* mulsi_const9 */
765 COSTS_N_INSNS (2), /* muldi */
766 COSTS_N_INSNS (20), /* divsi */
767 COSTS_N_INSNS (20), /* divdi */
768 COSTS_N_INSNS (3), /* fp */
769 COSTS_N_INSNS (3), /* dmul */
770 COSTS_N_INSNS (18), /* sdiv */
771 COSTS_N_INSNS (32), /* ddiv */
772 32, /* cache line size */
773 32, /* l1 cache */
774 1024, /* l2 cache */
775 1, /* streams */
776 0, /* SF->DF convert */
779 /* Instruction costs on PPC620 processors. */
780 static const
781 struct processor_costs ppc620_cost = {
782 COSTS_N_INSNS (5), /* mulsi */
783 COSTS_N_INSNS (4), /* mulsi_const */
784 COSTS_N_INSNS (3), /* mulsi_const9 */
785 COSTS_N_INSNS (7), /* muldi */
786 COSTS_N_INSNS (21), /* divsi */
787 COSTS_N_INSNS (37), /* divdi */
788 COSTS_N_INSNS (3), /* fp */
789 COSTS_N_INSNS (3), /* dmul */
790 COSTS_N_INSNS (18), /* sdiv */
791 COSTS_N_INSNS (32), /* ddiv */
792 128, /* cache line size */
793 32, /* l1 cache */
794 1024, /* l2 cache */
795 1, /* streams */
796 0, /* SF->DF convert */
799 /* Instruction costs on PPC630 processors. */
800 static const
801 struct processor_costs ppc630_cost = {
802 COSTS_N_INSNS (5), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (3), /* mulsi_const9 */
805 COSTS_N_INSNS (7), /* muldi */
806 COSTS_N_INSNS (21), /* divsi */
807 COSTS_N_INSNS (37), /* divdi */
808 COSTS_N_INSNS (3), /* fp */
809 COSTS_N_INSNS (3), /* dmul */
810 COSTS_N_INSNS (17), /* sdiv */
811 COSTS_N_INSNS (21), /* ddiv */
812 128, /* cache line size */
813 64, /* l1 cache */
814 1024, /* l2 cache */
815 1, /* streams */
816 0, /* SF->DF convert */
819 /* Instruction costs on Cell processor. */
820 /* COSTS_N_INSNS (1) ~ one add. */
821 static const
822 struct processor_costs ppccell_cost = {
823 COSTS_N_INSNS (9/2)+2, /* mulsi */
824 COSTS_N_INSNS (6/2), /* mulsi_const */
825 COSTS_N_INSNS (6/2), /* mulsi_const9 */
826 COSTS_N_INSNS (15/2)+2, /* muldi */
827 COSTS_N_INSNS (38/2), /* divsi */
828 COSTS_N_INSNS (70/2), /* divdi */
829 COSTS_N_INSNS (10/2), /* fp */
830 COSTS_N_INSNS (10/2), /* dmul */
831 COSTS_N_INSNS (74/2), /* sdiv */
832 COSTS_N_INSNS (74/2), /* ddiv */
833 128, /* cache line size */
834 32, /* l1 cache */
835 512, /* l2 cache */
836 6, /* streams */
837 0, /* SF->DF convert */
840 /* Instruction costs on PPC750 and PPC7400 processors. */
841 static const
842 struct processor_costs ppc750_cost = {
843 COSTS_N_INSNS (5), /* mulsi */
844 COSTS_N_INSNS (3), /* mulsi_const */
845 COSTS_N_INSNS (2), /* mulsi_const9 */
846 COSTS_N_INSNS (5), /* muldi */
847 COSTS_N_INSNS (17), /* divsi */
848 COSTS_N_INSNS (17), /* divdi */
849 COSTS_N_INSNS (3), /* fp */
850 COSTS_N_INSNS (3), /* dmul */
851 COSTS_N_INSNS (17), /* sdiv */
852 COSTS_N_INSNS (31), /* ddiv */
853 32, /* cache line size */
854 32, /* l1 cache */
855 512, /* l2 cache */
856 1, /* streams */
857 0, /* SF->DF convert */
860 /* Instruction costs on PPC7450 processors. */
861 static const
862 struct processor_costs ppc7450_cost = {
863 COSTS_N_INSNS (4), /* mulsi */
864 COSTS_N_INSNS (3), /* mulsi_const */
865 COSTS_N_INSNS (3), /* mulsi_const9 */
866 COSTS_N_INSNS (4), /* muldi */
867 COSTS_N_INSNS (23), /* divsi */
868 COSTS_N_INSNS (23), /* divdi */
869 COSTS_N_INSNS (5), /* fp */
870 COSTS_N_INSNS (5), /* dmul */
871 COSTS_N_INSNS (21), /* sdiv */
872 COSTS_N_INSNS (35), /* ddiv */
873 32, /* cache line size */
874 32, /* l1 cache */
875 1024, /* l2 cache */
876 1, /* streams */
877 0, /* SF->DF convert */
880 /* Instruction costs on PPC8540 processors. */
881 static const
882 struct processor_costs ppc8540_cost = {
883 COSTS_N_INSNS (4), /* mulsi */
884 COSTS_N_INSNS (4), /* mulsi_const */
885 COSTS_N_INSNS (4), /* mulsi_const9 */
886 COSTS_N_INSNS (4), /* muldi */
887 COSTS_N_INSNS (19), /* divsi */
888 COSTS_N_INSNS (19), /* divdi */
889 COSTS_N_INSNS (4), /* fp */
890 COSTS_N_INSNS (4), /* dmul */
891 COSTS_N_INSNS (29), /* sdiv */
892 COSTS_N_INSNS (29), /* ddiv */
893 32, /* cache line size */
894 32, /* l1 cache */
895 256, /* l2 cache */
896 1, /* prefetch streams /*/
897 0, /* SF->DF convert */
900 /* Instruction costs on E300C2 and E300C3 cores. */
901 static const
902 struct processor_costs ppce300c2c3_cost = {
903 COSTS_N_INSNS (4), /* mulsi */
904 COSTS_N_INSNS (4), /* mulsi_const */
905 COSTS_N_INSNS (4), /* mulsi_const9 */
906 COSTS_N_INSNS (4), /* muldi */
907 COSTS_N_INSNS (19), /* divsi */
908 COSTS_N_INSNS (19), /* divdi */
909 COSTS_N_INSNS (3), /* fp */
910 COSTS_N_INSNS (4), /* dmul */
911 COSTS_N_INSNS (18), /* sdiv */
912 COSTS_N_INSNS (33), /* ddiv */
914 16, /* l1 cache */
915 16, /* l2 cache */
916 1, /* prefetch streams /*/
917 0, /* SF->DF convert */
920 /* Instruction costs on PPCE500MC processors. */
921 static const
922 struct processor_costs ppce500mc_cost = {
923 COSTS_N_INSNS (4), /* mulsi */
924 COSTS_N_INSNS (4), /* mulsi_const */
925 COSTS_N_INSNS (4), /* mulsi_const9 */
926 COSTS_N_INSNS (4), /* muldi */
927 COSTS_N_INSNS (14), /* divsi */
928 COSTS_N_INSNS (14), /* divdi */
929 COSTS_N_INSNS (8), /* fp */
930 COSTS_N_INSNS (10), /* dmul */
931 COSTS_N_INSNS (36), /* sdiv */
932 COSTS_N_INSNS (66), /* ddiv */
933 64, /* cache line size */
934 32, /* l1 cache */
935 128, /* l2 cache */
936 1, /* prefetch streams /*/
937 0, /* SF->DF convert */
940 /* Instruction costs on PPCE500MC64 processors. */
941 static const
942 struct processor_costs ppce500mc64_cost = {
943 COSTS_N_INSNS (4), /* mulsi */
944 COSTS_N_INSNS (4), /* mulsi_const */
945 COSTS_N_INSNS (4), /* mulsi_const9 */
946 COSTS_N_INSNS (4), /* muldi */
947 COSTS_N_INSNS (14), /* divsi */
948 COSTS_N_INSNS (14), /* divdi */
949 COSTS_N_INSNS (4), /* fp */
950 COSTS_N_INSNS (10), /* dmul */
951 COSTS_N_INSNS (36), /* sdiv */
952 COSTS_N_INSNS (66), /* ddiv */
953 64, /* cache line size */
954 32, /* l1 cache */
955 128, /* l2 cache */
956 1, /* prefetch streams /*/
957 0, /* SF->DF convert */
960 /* Instruction costs on PPCE5500 processors. */
961 static const
962 struct processor_costs ppce5500_cost = {
963 COSTS_N_INSNS (5), /* mulsi */
964 COSTS_N_INSNS (5), /* mulsi_const */
965 COSTS_N_INSNS (4), /* mulsi_const9 */
966 COSTS_N_INSNS (5), /* muldi */
967 COSTS_N_INSNS (14), /* divsi */
968 COSTS_N_INSNS (14), /* divdi */
969 COSTS_N_INSNS (7), /* fp */
970 COSTS_N_INSNS (10), /* dmul */
971 COSTS_N_INSNS (36), /* sdiv */
972 COSTS_N_INSNS (66), /* ddiv */
973 64, /* cache line size */
974 32, /* l1 cache */
975 128, /* l2 cache */
976 1, /* prefetch streams /*/
977 0, /* SF->DF convert */
980 /* Instruction costs on PPCE6500 processors. */
981 static const
982 struct processor_costs ppce6500_cost = {
983 COSTS_N_INSNS (5), /* mulsi */
984 COSTS_N_INSNS (5), /* mulsi_const */
985 COSTS_N_INSNS (4), /* mulsi_const9 */
986 COSTS_N_INSNS (5), /* muldi */
987 COSTS_N_INSNS (14), /* divsi */
988 COSTS_N_INSNS (14), /* divdi */
989 COSTS_N_INSNS (7), /* fp */
990 COSTS_N_INSNS (10), /* dmul */
991 COSTS_N_INSNS (36), /* sdiv */
992 COSTS_N_INSNS (66), /* ddiv */
993 64, /* cache line size */
994 32, /* l1 cache */
995 128, /* l2 cache */
996 1, /* prefetch streams /*/
997 0, /* SF->DF convert */
1000 /* Instruction costs on AppliedMicro Titan processors. */
1001 static const
1002 struct processor_costs titan_cost = {
1003 COSTS_N_INSNS (5), /* mulsi */
1004 COSTS_N_INSNS (5), /* mulsi_const */
1005 COSTS_N_INSNS (5), /* mulsi_const9 */
1006 COSTS_N_INSNS (5), /* muldi */
1007 COSTS_N_INSNS (18), /* divsi */
1008 COSTS_N_INSNS (18), /* divdi */
1009 COSTS_N_INSNS (10), /* fp */
1010 COSTS_N_INSNS (10), /* dmul */
1011 COSTS_N_INSNS (46), /* sdiv */
1012 COSTS_N_INSNS (72), /* ddiv */
1013 32, /* cache line size */
1014 32, /* l1 cache */
1015 512, /* l2 cache */
1016 1, /* prefetch streams /*/
1017 0, /* SF->DF convert */
1020 /* Instruction costs on POWER4 and POWER5 processors. */
1021 static const
1022 struct processor_costs power4_cost = {
1023 COSTS_N_INSNS (3), /* mulsi */
1024 COSTS_N_INSNS (2), /* mulsi_const */
1025 COSTS_N_INSNS (2), /* mulsi_const9 */
1026 COSTS_N_INSNS (4), /* muldi */
1027 COSTS_N_INSNS (18), /* divsi */
1028 COSTS_N_INSNS (34), /* divdi */
1029 COSTS_N_INSNS (3), /* fp */
1030 COSTS_N_INSNS (3), /* dmul */
1031 COSTS_N_INSNS (17), /* sdiv */
1032 COSTS_N_INSNS (17), /* ddiv */
1033 128, /* cache line size */
1034 32, /* l1 cache */
1035 1024, /* l2 cache */
1036 8, /* prefetch streams /*/
1037 0, /* SF->DF convert */
1040 /* Instruction costs on POWER6 processors. */
1041 static const
1042 struct processor_costs power6_cost = {
1043 COSTS_N_INSNS (8), /* mulsi */
1044 COSTS_N_INSNS (8), /* mulsi_const */
1045 COSTS_N_INSNS (8), /* mulsi_const9 */
1046 COSTS_N_INSNS (8), /* muldi */
1047 COSTS_N_INSNS (22), /* divsi */
1048 COSTS_N_INSNS (28), /* divdi */
1049 COSTS_N_INSNS (3), /* fp */
1050 COSTS_N_INSNS (3), /* dmul */
1051 COSTS_N_INSNS (13), /* sdiv */
1052 COSTS_N_INSNS (16), /* ddiv */
1053 128, /* cache line size */
1054 64, /* l1 cache */
1055 2048, /* l2 cache */
1056 16, /* prefetch streams */
1057 0, /* SF->DF convert */
1060 /* Instruction costs on POWER7 processors. */
1061 static const
1062 struct processor_costs power7_cost = {
1063 COSTS_N_INSNS (2), /* mulsi */
1064 COSTS_N_INSNS (2), /* mulsi_const */
1065 COSTS_N_INSNS (2), /* mulsi_const9 */
1066 COSTS_N_INSNS (2), /* muldi */
1067 COSTS_N_INSNS (18), /* divsi */
1068 COSTS_N_INSNS (34), /* divdi */
1069 COSTS_N_INSNS (3), /* fp */
1070 COSTS_N_INSNS (3), /* dmul */
1071 COSTS_N_INSNS (13), /* sdiv */
1072 COSTS_N_INSNS (16), /* ddiv */
1073 128, /* cache line size */
1074 32, /* l1 cache */
1075 256, /* l2 cache */
1076 12, /* prefetch streams */
1077 COSTS_N_INSNS (3), /* SF->DF convert */
1080 /* Instruction costs on POWER8 processors. */
1081 static const
1082 struct processor_costs power8_cost = {
1083 COSTS_N_INSNS (3), /* mulsi */
1084 COSTS_N_INSNS (3), /* mulsi_const */
1085 COSTS_N_INSNS (3), /* mulsi_const9 */
1086 COSTS_N_INSNS (3), /* muldi */
1087 COSTS_N_INSNS (19), /* divsi */
1088 COSTS_N_INSNS (35), /* divdi */
1089 COSTS_N_INSNS (3), /* fp */
1090 COSTS_N_INSNS (3), /* dmul */
1091 COSTS_N_INSNS (14), /* sdiv */
1092 COSTS_N_INSNS (17), /* ddiv */
1093 128, /* cache line size */
1094 32, /* l1 cache */
1095 256, /* l2 cache */
1096 12, /* prefetch streams */
1097 COSTS_N_INSNS (3), /* SF->DF convert */
1100 /* Instruction costs on POWER9 processors. */
1101 static const
1102 struct processor_costs power9_cost = {
1103 COSTS_N_INSNS (3), /* mulsi */
1104 COSTS_N_INSNS (3), /* mulsi_const */
1105 COSTS_N_INSNS (3), /* mulsi_const9 */
1106 COSTS_N_INSNS (3), /* muldi */
1107 COSTS_N_INSNS (19), /* divsi */
1108 COSTS_N_INSNS (35), /* divdi */
1109 COSTS_N_INSNS (3), /* fp */
1110 COSTS_N_INSNS (3), /* dmul */
1111 COSTS_N_INSNS (14), /* sdiv */
1112 COSTS_N_INSNS (17), /* ddiv */
1113 128, /* cache line size */
1114 32, /* l1 cache */
1115 256, /* l2 cache */
1116 12, /* prefetch streams */
1117 COSTS_N_INSNS (3), /* SF->DF convert */
1120 /* Instruction costs on POWER A2 processors. */
1121 static const
1122 struct processor_costs ppca2_cost = {
1123 COSTS_N_INSNS (16), /* mulsi */
1124 COSTS_N_INSNS (16), /* mulsi_const */
1125 COSTS_N_INSNS (16), /* mulsi_const9 */
1126 COSTS_N_INSNS (16), /* muldi */
1127 COSTS_N_INSNS (22), /* divsi */
1128 COSTS_N_INSNS (28), /* divdi */
1129 COSTS_N_INSNS (3), /* fp */
1130 COSTS_N_INSNS (3), /* dmul */
1131 COSTS_N_INSNS (59), /* sdiv */
1132 COSTS_N_INSNS (72), /* ddiv */
1134 16, /* l1 cache */
1135 2048, /* l2 cache */
1136 16, /* prefetch streams */
1137 0, /* SF->DF convert */
1141 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1142 #undef RS6000_BUILTIN_0
1143 #undef RS6000_BUILTIN_1
1144 #undef RS6000_BUILTIN_2
1145 #undef RS6000_BUILTIN_3
1146 #undef RS6000_BUILTIN_A
1147 #undef RS6000_BUILTIN_D
1148 #undef RS6000_BUILTIN_E
1149 #undef RS6000_BUILTIN_H
1150 #undef RS6000_BUILTIN_P
1151 #undef RS6000_BUILTIN_Q
1152 #undef RS6000_BUILTIN_S
1153 #undef RS6000_BUILTIN_X
1155 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1156 { NAME, ICODE, MASK, ATTR },
1158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1159 { NAME, ICODE, MASK, ATTR },
1161 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1162 { NAME, ICODE, MASK, ATTR },
1164 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1165 { NAME, ICODE, MASK, ATTR },
1167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1168 { NAME, ICODE, MASK, ATTR },
1170 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1171 { NAME, ICODE, MASK, ATTR },
1173 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1174 { NAME, ICODE, MASK, ATTR },
1176 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1177 { NAME, ICODE, MASK, ATTR },
1179 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1180 { NAME, ICODE, MASK, ATTR },
1182 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1183 { NAME, ICODE, MASK, ATTR },
1185 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1186 { NAME, ICODE, MASK, ATTR },
1188 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1189 { NAME, ICODE, MASK, ATTR },
1191 struct rs6000_builtin_info_type {
1192 const char *name;
1193 const enum insn_code icode;
1194 const HOST_WIDE_INT mask;
1195 const unsigned attr;
1198 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1200 #include "rs6000-builtin.def"
1203 #undef RS6000_BUILTIN_0
1204 #undef RS6000_BUILTIN_1
1205 #undef RS6000_BUILTIN_2
1206 #undef RS6000_BUILTIN_3
1207 #undef RS6000_BUILTIN_A
1208 #undef RS6000_BUILTIN_D
1209 #undef RS6000_BUILTIN_E
1210 #undef RS6000_BUILTIN_H
1211 #undef RS6000_BUILTIN_P
1212 #undef RS6000_BUILTIN_Q
1213 #undef RS6000_BUILTIN_S
1214 #undef RS6000_BUILTIN_X
1216 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1217 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1220 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1221 static bool spe_func_has_64bit_regs_p (void);
1222 static struct machine_function * rs6000_init_machine_status (void);
1223 static int rs6000_ra_ever_killed (void);
1224 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1225 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1226 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1227 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1228 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1229 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1230 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1231 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1232 bool);
1233 static int rs6000_debug_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
1234 static bool is_microcoded_insn (rtx_insn *);
1235 static bool is_nonpipeline_insn (rtx_insn *);
1236 static bool is_cracked_insn (rtx_insn *);
1237 static bool is_load_insn (rtx, rtx *);
1238 static bool is_store_insn (rtx, rtx *);
1239 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1240 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1241 static bool insn_must_be_first_in_group (rtx_insn *);
1242 static bool insn_must_be_last_in_group (rtx_insn *);
1243 static void altivec_init_builtins (void);
1244 static tree builtin_function_type (machine_mode, machine_mode,
1245 machine_mode, machine_mode,
1246 enum rs6000_builtins, const char *name);
1247 static void rs6000_common_init_builtins (void);
1248 static void paired_init_builtins (void);
1249 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1250 static void spe_init_builtins (void);
1251 static void htm_init_builtins (void);
1252 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1253 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1254 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1255 static rs6000_stack_t *rs6000_stack_info (void);
1256 static void is_altivec_return_reg (rtx, void *);
1257 int easy_vector_constant (rtx, machine_mode);
1258 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1259 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1260 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1261 bool, bool);
1262 #if TARGET_MACHO
1263 static void macho_branch_islands (void);
1264 #endif
1265 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1266 int, int *);
1267 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1268 int, int, int *);
1269 static bool rs6000_mode_dependent_address (const_rtx);
1270 static bool rs6000_debug_mode_dependent_address (const_rtx);
1271 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1272 machine_mode, rtx);
1273 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1274 machine_mode,
1275 rtx);
1276 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1277 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1278 enum reg_class);
1279 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1280 machine_mode);
1281 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1282 enum reg_class,
1283 machine_mode);
1284 static bool rs6000_cannot_change_mode_class (machine_mode,
1285 machine_mode,
1286 enum reg_class);
1287 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1288 machine_mode,
1289 enum reg_class);
1290 static bool rs6000_save_toc_in_prologue_p (void);
1291 static rtx rs6000_internal_arg_pointer (void);
1293 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1294 int, int *)
1295 = rs6000_legitimize_reload_address;
1297 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1298 = rs6000_mode_dependent_address;
1300 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1301 machine_mode, rtx)
1302 = rs6000_secondary_reload_class;
1304 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1305 = rs6000_preferred_reload_class;
1307 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1308 machine_mode)
1309 = rs6000_secondary_memory_needed;
1311 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1312 machine_mode,
1313 enum reg_class)
1314 = rs6000_cannot_change_mode_class;
1316 const int INSN_NOT_AVAILABLE = -1;
1318 static void rs6000_print_isa_options (FILE *, int, const char *,
1319 HOST_WIDE_INT);
1320 static void rs6000_print_builtin_options (FILE *, int, const char *,
1321 HOST_WIDE_INT);
1323 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1324 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1325 enum rs6000_reg_type,
1326 machine_mode,
1327 secondary_reload_info *,
1328 bool);
1329 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1330 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1331 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1333 /* Hash table stuff for keeping track of TOC entries. */
1335 struct GTY((for_user)) toc_hash_struct
1337 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1338 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1339 rtx key;
1340 machine_mode key_mode;
1341 int labelno;
1344 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1346 static hashval_t hash (toc_hash_struct *);
1347 static bool equal (toc_hash_struct *, toc_hash_struct *);
1350 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1352 /* Hash table to keep track of the argument types for builtin functions. */
1354 struct GTY((for_user)) builtin_hash_struct
1356 tree type;
1357 machine_mode mode[4]; /* return value + 3 arguments. */
1358 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1361 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1363 static hashval_t hash (builtin_hash_struct *);
1364 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1367 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1370 /* Default register names. */
1371 char rs6000_reg_names[][8] =
1373 "0", "1", "2", "3", "4", "5", "6", "7",
1374 "8", "9", "10", "11", "12", "13", "14", "15",
1375 "16", "17", "18", "19", "20", "21", "22", "23",
1376 "24", "25", "26", "27", "28", "29", "30", "31",
1377 "0", "1", "2", "3", "4", "5", "6", "7",
1378 "8", "9", "10", "11", "12", "13", "14", "15",
1379 "16", "17", "18", "19", "20", "21", "22", "23",
1380 "24", "25", "26", "27", "28", "29", "30", "31",
1381 "mq", "lr", "ctr","ap",
1382 "0", "1", "2", "3", "4", "5", "6", "7",
1383 "ca",
1384 /* AltiVec registers. */
1385 "0", "1", "2", "3", "4", "5", "6", "7",
1386 "8", "9", "10", "11", "12", "13", "14", "15",
1387 "16", "17", "18", "19", "20", "21", "22", "23",
1388 "24", "25", "26", "27", "28", "29", "30", "31",
1389 "vrsave", "vscr",
1390 /* SPE registers. */
1391 "spe_acc", "spefscr",
1392 /* Soft frame pointer. */
1393 "sfp",
1394 /* HTM SPR registers. */
1395 "tfhar", "tfiar", "texasr",
1396 /* SPE High registers. */
1397 "0", "1", "2", "3", "4", "5", "6", "7",
1398 "8", "9", "10", "11", "12", "13", "14", "15",
1399 "16", "17", "18", "19", "20", "21", "22", "23",
1400 "24", "25", "26", "27", "28", "29", "30", "31"
1403 #ifdef TARGET_REGNAMES
1404 static const char alt_reg_names[][8] =
1406 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1407 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1408 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1409 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1410 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1411 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1412 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1413 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1414 "mq", "lr", "ctr", "ap",
1415 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1416 "ca",
1417 /* AltiVec registers. */
1418 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1419 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1420 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1421 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1422 "vrsave", "vscr",
1423 /* SPE registers. */
1424 "spe_acc", "spefscr",
1425 /* Soft frame pointer. */
1426 "sfp",
1427 /* HTM SPR registers. */
1428 "tfhar", "tfiar", "texasr",
1429 /* SPE High registers. */
1430 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1431 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1432 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1433 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1435 #endif
1437 /* Table of valid machine attributes. */
1439 static const struct attribute_spec rs6000_attribute_table[] =
1441 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1442 affects_type_identity } */
1443 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1444 false },
1445 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1446 false },
1447 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1448 false },
1449 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1450 false },
1451 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1452 false },
1453 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1454 SUBTARGET_ATTRIBUTE_TABLE,
1455 #endif
1456 { NULL, 0, 0, false, false, false, NULL, false }
1459 #ifndef TARGET_PROFILE_KERNEL
1460 #define TARGET_PROFILE_KERNEL 0
1461 #endif
1463 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1464 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1466 /* Initialize the GCC target structure. */
1467 #undef TARGET_ATTRIBUTE_TABLE
1468 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1469 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1470 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1471 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1472 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1474 #undef TARGET_ASM_ALIGNED_DI_OP
1475 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1477 /* Default unaligned ops are only provided for ELF. Find the ops needed
1478 for non-ELF systems. */
1479 #ifndef OBJECT_FORMAT_ELF
1480 #if TARGET_XCOFF
1481 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1482 64-bit targets. */
1483 #undef TARGET_ASM_UNALIGNED_HI_OP
1484 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1485 #undef TARGET_ASM_UNALIGNED_SI_OP
1486 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1487 #undef TARGET_ASM_UNALIGNED_DI_OP
1488 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1489 #else
1490 /* For Darwin. */
1491 #undef TARGET_ASM_UNALIGNED_HI_OP
1492 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1493 #undef TARGET_ASM_UNALIGNED_SI_OP
1494 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1495 #undef TARGET_ASM_UNALIGNED_DI_OP
1496 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1497 #undef TARGET_ASM_ALIGNED_DI_OP
1498 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1499 #endif
1500 #endif
1502 /* This hook deals with fixups for relocatable code and DI-mode objects
1503 in 64-bit code. */
1504 #undef TARGET_ASM_INTEGER
1505 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1507 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1508 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1509 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1510 #endif
1512 #undef TARGET_SET_UP_BY_PROLOGUE
1513 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1515 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1516 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1518 #undef TARGET_INTERNAL_ARG_POINTER
1519 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1521 #undef TARGET_HAVE_TLS
1522 #define TARGET_HAVE_TLS HAVE_AS_TLS
1524 #undef TARGET_CANNOT_FORCE_CONST_MEM
1525 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1527 #undef TARGET_DELEGITIMIZE_ADDRESS
1528 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1530 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1531 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1533 #undef TARGET_ASM_FUNCTION_PROLOGUE
1534 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1535 #undef TARGET_ASM_FUNCTION_EPILOGUE
1536 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1538 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1539 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1541 #undef TARGET_LEGITIMIZE_ADDRESS
1542 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1544 #undef TARGET_SCHED_VARIABLE_ISSUE
1545 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1547 #undef TARGET_SCHED_ISSUE_RATE
1548 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1549 #undef TARGET_SCHED_ADJUST_COST
1550 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1551 #undef TARGET_SCHED_ADJUST_PRIORITY
1552 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1553 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1554 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1555 #undef TARGET_SCHED_INIT
1556 #define TARGET_SCHED_INIT rs6000_sched_init
1557 #undef TARGET_SCHED_FINISH
1558 #define TARGET_SCHED_FINISH rs6000_sched_finish
1559 #undef TARGET_SCHED_REORDER
1560 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1561 #undef TARGET_SCHED_REORDER2
1562 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1564 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1565 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1567 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1568 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1570 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1571 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1572 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1573 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1574 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1575 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1576 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1577 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1579 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1580 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1581 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1582 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1583 rs6000_builtin_support_vector_misalignment
1584 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1585 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1586 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1587 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1588 rs6000_builtin_vectorization_cost
1589 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1590 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1591 rs6000_preferred_simd_mode
1592 #undef TARGET_VECTORIZE_INIT_COST
1593 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1594 #undef TARGET_VECTORIZE_ADD_STMT_COST
1595 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1596 #undef TARGET_VECTORIZE_FINISH_COST
1597 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1598 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1599 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1601 #undef TARGET_INIT_BUILTINS
1602 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1603 #undef TARGET_BUILTIN_DECL
1604 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1606 #undef TARGET_FOLD_BUILTIN
1607 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1609 #undef TARGET_EXPAND_BUILTIN
1610 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1612 #undef TARGET_MANGLE_TYPE
1613 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1615 #undef TARGET_INIT_LIBFUNCS
1616 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1618 #if TARGET_MACHO
1619 #undef TARGET_BINDS_LOCAL_P
1620 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1621 #endif
1623 #undef TARGET_MS_BITFIELD_LAYOUT_P
1624 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1626 #undef TARGET_ASM_OUTPUT_MI_THUNK
1627 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1629 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1630 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1632 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1633 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1635 #undef TARGET_REGISTER_MOVE_COST
1636 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1637 #undef TARGET_MEMORY_MOVE_COST
1638 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1639 #undef TARGET_CANNOT_COPY_INSN_P
1640 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1641 #undef TARGET_RTX_COSTS
1642 #define TARGET_RTX_COSTS rs6000_rtx_costs
1643 #undef TARGET_ADDRESS_COST
1644 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1646 #undef TARGET_DWARF_REGISTER_SPAN
1647 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1649 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1650 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1652 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1653 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1655 #undef TARGET_PROMOTE_FUNCTION_MODE
1656 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1658 #undef TARGET_RETURN_IN_MEMORY
1659 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1661 #undef TARGET_RETURN_IN_MSB
1662 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1664 #undef TARGET_SETUP_INCOMING_VARARGS
1665 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1667 /* Always strict argument naming on rs6000. */
1668 #undef TARGET_STRICT_ARGUMENT_NAMING
1669 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1670 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1671 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1672 #undef TARGET_SPLIT_COMPLEX_ARG
1673 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1674 #undef TARGET_MUST_PASS_IN_STACK
1675 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1676 #undef TARGET_PASS_BY_REFERENCE
1677 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1678 #undef TARGET_ARG_PARTIAL_BYTES
1679 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1680 #undef TARGET_FUNCTION_ARG_ADVANCE
1681 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1682 #undef TARGET_FUNCTION_ARG
1683 #define TARGET_FUNCTION_ARG rs6000_function_arg
1684 #undef TARGET_FUNCTION_ARG_BOUNDARY
1685 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1687 #undef TARGET_BUILD_BUILTIN_VA_LIST
1688 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1690 #undef TARGET_EXPAND_BUILTIN_VA_START
1691 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1693 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1694 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1696 #undef TARGET_EH_RETURN_FILTER_MODE
1697 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1699 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1700 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1702 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1703 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1705 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1706 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1708 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1709 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1711 #undef TARGET_MD_ASM_ADJUST
1712 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1714 #undef TARGET_OPTION_OVERRIDE
1715 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1717 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1718 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1719 rs6000_builtin_vectorized_function
1721 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1722 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1723 rs6000_builtin_md_vectorized_function
1725 #if !TARGET_MACHO
1726 #undef TARGET_STACK_PROTECT_FAIL
1727 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1728 #endif
1730 #ifdef HAVE_AS_TLS
1731 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1732 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1733 #endif
1735 /* Use a 32-bit anchor range. This leads to sequences like:
1737 addis tmp,anchor,high
1738 add dest,tmp,low
1740 where tmp itself acts as an anchor, and can be shared between
1741 accesses to the same 64k page. */
1742 #undef TARGET_MIN_ANCHOR_OFFSET
1743 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1744 #undef TARGET_MAX_ANCHOR_OFFSET
1745 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1746 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1747 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1748 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1749 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1751 #undef TARGET_BUILTIN_RECIPROCAL
1752 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1754 #undef TARGET_EXPAND_TO_RTL_HOOK
1755 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1757 #undef TARGET_INSTANTIATE_DECLS
1758 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1760 #undef TARGET_SECONDARY_RELOAD
1761 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1763 #undef TARGET_LEGITIMATE_ADDRESS_P
1764 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1766 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1767 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1769 #undef TARGET_LRA_P
1770 #define TARGET_LRA_P rs6000_lra_p
1772 #undef TARGET_CAN_ELIMINATE
1773 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1775 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1776 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1778 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1779 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1781 #undef TARGET_TRAMPOLINE_INIT
1782 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1784 #undef TARGET_FUNCTION_VALUE
1785 #define TARGET_FUNCTION_VALUE rs6000_function_value
1787 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1788 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1790 #undef TARGET_OPTION_SAVE
1791 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1793 #undef TARGET_OPTION_RESTORE
1794 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1796 #undef TARGET_OPTION_PRINT
1797 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1799 #undef TARGET_CAN_INLINE_P
1800 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1802 #undef TARGET_SET_CURRENT_FUNCTION
1803 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1805 #undef TARGET_LEGITIMATE_CONSTANT_P
1806 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1808 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1809 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1811 #undef TARGET_CAN_USE_DOLOOP_P
1812 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1814 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1815 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1817 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1818 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1819 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1820 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1821 #undef TARGET_UNWIND_WORD_MODE
1822 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1824 #undef TARGET_OFFLOAD_OPTIONS
1825 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1827 #undef TARGET_C_MODE_FOR_SUFFIX
1828 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1830 #undef TARGET_INVALID_BINARY_OP
1831 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1833 #undef TARGET_OPTAB_SUPPORTED_P
1834 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1837 /* Processor table. */
1838 struct rs6000_ptt
1840 const char *const name; /* Canonical processor name. */
1841 const enum processor_type processor; /* Processor type enum value. */
1842 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1845 static struct rs6000_ptt const processor_target_table[] =
1847 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1848 #include "rs6000-cpus.def"
1849 #undef RS6000_CPU
1852 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1853 name is invalid. */
1855 static int
1856 rs6000_cpu_name_lookup (const char *name)
1858 size_t i;
1860 if (name != NULL)
1862 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1863 if (! strcmp (name, processor_target_table[i].name))
1864 return (int)i;
1867 return -1;
1871 /* Return number of consecutive hard regs needed starting at reg REGNO
1872 to hold something of mode MODE.
1873 This is ordinarily the length in words of a value of mode MODE
1874 but can be less for certain modes in special long registers.
1876 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1877 scalar instructions. The upper 32 bits are only available to the
1878 SIMD instructions.
1880 POWER and PowerPC GPRs hold 32 bits worth;
1881 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1883 static int
1884 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1886 unsigned HOST_WIDE_INT reg_size;
1888 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1889 128-bit floating point that can go in vector registers, which has VSX
1890 memory addressing. */
1891 if (FP_REGNO_P (regno))
1892 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1893 ? UNITS_PER_VSX_WORD
1894 : UNITS_PER_FP_WORD);
1896 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1897 reg_size = UNITS_PER_SPE_WORD;
1899 else if (ALTIVEC_REGNO_P (regno))
1900 reg_size = UNITS_PER_ALTIVEC_WORD;
1902 /* The value returned for SCmode in the E500 double case is 2 for
1903 ABI compatibility; storing an SCmode value in a single register
1904 would require function_arg and rs6000_spe_function_arg to handle
1905 SCmode so as to pass the value correctly in a pair of
1906 registers. */
1907 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1908 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
1909 reg_size = UNITS_PER_FP_WORD;
1911 else
1912 reg_size = UNITS_PER_WORD;
1914 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1917 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1918 MODE. */
1919 static int
1920 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
1922 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1924 if (COMPLEX_MODE_P (mode))
1925 mode = GET_MODE_INNER (mode);
1927 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1928 register combinations, and use PTImode where we need to deal with quad
1929 word memory operations. Don't allow quad words in the argument or frame
1930 pointer registers, just registers 0..31. */
1931 if (mode == PTImode)
1932 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1933 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1934 && ((regno & 1) == 0));
1936 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1937 implementations. Don't allow an item to be split between a FP register
1938 and an Altivec register. Allow TImode in all VSX registers if the user
1939 asked for it. */
1940 if (TARGET_VSX && VSX_REGNO_P (regno)
1941 && (VECTOR_MEM_VSX_P (mode)
1942 || FLOAT128_VECTOR_P (mode)
1943 || reg_addr[mode].scalar_in_vmx_p
1944 || (TARGET_VSX_TIMODE && mode == TImode)
1945 || (TARGET_VADDUQM && mode == V1TImode)
1946 || (TARGET_UPPER_REGS_DI && mode == DImode)))
1948 if (FP_REGNO_P (regno))
1949 return FP_REGNO_P (last_regno);
1951 if (ALTIVEC_REGNO_P (regno))
1953 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1954 return 0;
1956 return ALTIVEC_REGNO_P (last_regno);
1960 /* The GPRs can hold any mode, but values bigger than one register
1961 cannot go past R31. */
1962 if (INT_REGNO_P (regno))
1963 return INT_REGNO_P (last_regno);
1965 /* The float registers (except for VSX vector modes) can only hold floating
1966 modes and DImode. */
1967 if (FP_REGNO_P (regno))
1969 if (FLOAT128_VECTOR_P (mode))
1970 return false;
1972 if (SCALAR_FLOAT_MODE_P (mode)
1973 && (mode != TDmode || (regno % 2) == 0)
1974 && FP_REGNO_P (last_regno))
1975 return 1;
1977 if (GET_MODE_CLASS (mode) == MODE_INT
1978 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1979 return 1;
1981 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1982 && PAIRED_VECTOR_MODE (mode))
1983 return 1;
1985 return 0;
1988 /* The CR register can only hold CC modes. */
1989 if (CR_REGNO_P (regno))
1990 return GET_MODE_CLASS (mode) == MODE_CC;
1992 if (CA_REGNO_P (regno))
1993 return mode == Pmode || mode == SImode;
1995 /* AltiVec only in AldyVec registers. */
1996 if (ALTIVEC_REGNO_P (regno))
1997 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1998 || mode == V1TImode);
2000 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2001 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2002 return 1;
2004 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2005 and it must be able to fit within the register set. */
2007 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2010 /* Print interesting facts about registers. */
2011 static void
2012 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2014 int r, m;
2016 for (r = first_regno; r <= last_regno; ++r)
2018 const char *comma = "";
2019 int len;
2021 if (first_regno == last_regno)
2022 fprintf (stderr, "%s:\t", reg_name);
2023 else
2024 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2026 len = 8;
2027 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2028 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2030 if (len > 70)
2032 fprintf (stderr, ",\n\t");
2033 len = 8;
2034 comma = "";
2037 if (rs6000_hard_regno_nregs[m][r] > 1)
2038 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2039 rs6000_hard_regno_nregs[m][r]);
2040 else
2041 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2043 comma = ", ";
2046 if (call_used_regs[r])
2048 if (len > 70)
2050 fprintf (stderr, ",\n\t");
2051 len = 8;
2052 comma = "";
2055 len += fprintf (stderr, "%s%s", comma, "call-used");
2056 comma = ", ";
2059 if (fixed_regs[r])
2061 if (len > 70)
2063 fprintf (stderr, ",\n\t");
2064 len = 8;
2065 comma = "";
2068 len += fprintf (stderr, "%s%s", comma, "fixed");
2069 comma = ", ";
2072 if (len > 70)
2074 fprintf (stderr, ",\n\t");
2075 comma = "";
2078 len += fprintf (stderr, "%sreg-class = %s", comma,
2079 reg_class_names[(int)rs6000_regno_regclass[r]]);
2080 comma = ", ";
2082 if (len > 70)
2084 fprintf (stderr, ",\n\t");
2085 comma = "";
2088 fprintf (stderr, "%sregno = %d\n", comma, r);
2092 static const char *
2093 rs6000_debug_vector_unit (enum rs6000_vector v)
2095 const char *ret;
2097 switch (v)
2099 case VECTOR_NONE: ret = "none"; break;
2100 case VECTOR_ALTIVEC: ret = "altivec"; break;
2101 case VECTOR_VSX: ret = "vsx"; break;
2102 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2103 case VECTOR_PAIRED: ret = "paired"; break;
2104 case VECTOR_SPE: ret = "spe"; break;
2105 case VECTOR_OTHER: ret = "other"; break;
2106 default: ret = "unknown"; break;
2109 return ret;
2112 /* Inner function printing just the address mask for a particular reload
2113 register class. */
2114 DEBUG_FUNCTION char *
2115 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2117 static char ret[8];
2118 char *p = ret;
2120 if ((mask & RELOAD_REG_VALID) != 0)
2121 *p++ = 'v';
2122 else if (keep_spaces)
2123 *p++ = ' ';
2125 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2126 *p++ = 'm';
2127 else if (keep_spaces)
2128 *p++ = ' ';
2130 if ((mask & RELOAD_REG_INDEXED) != 0)
2131 *p++ = 'i';
2132 else if (keep_spaces)
2133 *p++ = ' ';
2135 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2136 *p++ = 'O';
2137 else if ((mask & RELOAD_REG_OFFSET) != 0)
2138 *p++ = 'o';
2139 else if (keep_spaces)
2140 *p++ = ' ';
2142 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2143 *p++ = '+';
2144 else if (keep_spaces)
2145 *p++ = ' ';
2147 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2148 *p++ = '+';
2149 else if (keep_spaces)
2150 *p++ = ' ';
2152 if ((mask & RELOAD_REG_AND_M16) != 0)
2153 *p++ = '&';
2154 else if (keep_spaces)
2155 *p++ = ' ';
2157 *p = '\0';
2159 return ret;
2162 /* Print the address masks in a human readble fashion. */
2163 DEBUG_FUNCTION void
2164 rs6000_debug_print_mode (ssize_t m)
2166 ssize_t rc;
2167 int spaces = 0;
2168 bool fuse_extra_p;
2170 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2171 for (rc = 0; rc < N_RELOAD_REG; rc++)
2172 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2173 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2175 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2176 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2177 fprintf (stderr, " Reload=%c%c",
2178 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2179 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2180 else
2181 spaces += sizeof (" Reload=sl") - 1;
2183 if (reg_addr[m].scalar_in_vmx_p)
2185 fprintf (stderr, "%*s Upper=y", spaces, "");
2186 spaces = 0;
2188 else
2189 spaces += sizeof (" Upper=y") - 1;
2191 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2192 || reg_addr[m].fused_toc);
2193 if (!fuse_extra_p)
2195 for (rc = 0; rc < N_RELOAD_REG; rc++)
2197 if (rc != RELOAD_REG_ANY)
2199 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2200 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2201 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2202 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2203 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2205 fuse_extra_p = true;
2206 break;
2212 if (fuse_extra_p)
2214 fprintf (stderr, "%*s Fuse:", spaces, "");
2215 spaces = 0;
2217 for (rc = 0; rc < N_RELOAD_REG; rc++)
2219 if (rc != RELOAD_REG_ANY)
2221 char load, store;
2223 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2224 load = 'l';
2225 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2226 load = 'L';
2227 else
2228 load = '-';
2230 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2231 store = 's';
2232 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2233 store = 'S';
2234 else
2235 store = '-';
2237 if (load == '-' && store == '-')
2238 spaces += 5;
2239 else
2241 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2242 reload_reg_map[rc].name[0], load, store);
2243 spaces = 0;
2248 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2250 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2251 spaces = 0;
2253 else
2254 spaces += sizeof (" P8gpr") - 1;
2256 if (reg_addr[m].fused_toc)
2258 fprintf (stderr, "%*sToc", (spaces + 1), "");
2259 spaces = 0;
2261 else
2262 spaces += sizeof (" Toc") - 1;
2264 else
2265 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2267 if (rs6000_vector_unit[m] != VECTOR_NONE
2268 || rs6000_vector_mem[m] != VECTOR_NONE)
2270 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2271 spaces, "",
2272 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2273 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2276 fputs ("\n", stderr);
2279 #define DEBUG_FMT_ID "%-32s= "
2280 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2281 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2282 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2284 /* Print various interesting information with -mdebug=reg. */
2285 static void
2286 rs6000_debug_reg_global (void)
2288 static const char *const tf[2] = { "false", "true" };
2289 const char *nl = (const char *)0;
2290 int m;
2291 size_t m1, m2, v;
2292 char costly_num[20];
2293 char nop_num[20];
2294 char flags_buffer[40];
2295 const char *costly_str;
2296 const char *nop_str;
2297 const char *trace_str;
2298 const char *abi_str;
2299 const char *cmodel_str;
2300 struct cl_target_option cl_opts;
2302 /* Modes we want tieable information on. */
2303 static const machine_mode print_tieable_modes[] = {
2304 QImode,
2305 HImode,
2306 SImode,
2307 DImode,
2308 TImode,
2309 PTImode,
2310 SFmode,
2311 DFmode,
2312 TFmode,
2313 IFmode,
2314 KFmode,
2315 SDmode,
2316 DDmode,
2317 TDmode,
2318 V8QImode,
2319 V4HImode,
2320 V2SImode,
2321 V16QImode,
2322 V8HImode,
2323 V4SImode,
2324 V2DImode,
2325 V1TImode,
2326 V32QImode,
2327 V16HImode,
2328 V8SImode,
2329 V4DImode,
2330 V2TImode,
2331 V2SFmode,
2332 V4SFmode,
2333 V2DFmode,
2334 V8SFmode,
2335 V4DFmode,
2336 CCmode,
2337 CCUNSmode,
2338 CCEQmode,
2341 /* Virtual regs we are interested in. */
2342 const static struct {
2343 int regno; /* register number. */
2344 const char *name; /* register name. */
2345 } virtual_regs[] = {
2346 { STACK_POINTER_REGNUM, "stack pointer:" },
2347 { TOC_REGNUM, "toc: " },
2348 { STATIC_CHAIN_REGNUM, "static chain: " },
2349 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2350 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2351 { ARG_POINTER_REGNUM, "arg pointer: " },
2352 { FRAME_POINTER_REGNUM, "frame pointer:" },
2353 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2354 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2355 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2356 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2357 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2358 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2359 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2360 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2361 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2364 fputs ("\nHard register information:\n", stderr);
2365 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2366 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2367 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2368 LAST_ALTIVEC_REGNO,
2369 "vs");
2370 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2371 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2372 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2373 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2374 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2375 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2376 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2377 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2379 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2380 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2381 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2383 fprintf (stderr,
2384 "\n"
2385 "d reg_class = %s\n"
2386 "f reg_class = %s\n"
2387 "v reg_class = %s\n"
2388 "wa reg_class = %s\n"
2389 "wb reg_class = %s\n"
2390 "wd reg_class = %s\n"
2391 "we reg_class = %s\n"
2392 "wf reg_class = %s\n"
2393 "wg reg_class = %s\n"
2394 "wh reg_class = %s\n"
2395 "wi reg_class = %s\n"
2396 "wj reg_class = %s\n"
2397 "wk reg_class = %s\n"
2398 "wl reg_class = %s\n"
2399 "wm reg_class = %s\n"
2400 "wo reg_class = %s\n"
2401 "wp reg_class = %s\n"
2402 "wq reg_class = %s\n"
2403 "wr reg_class = %s\n"
2404 "ws reg_class = %s\n"
2405 "wt reg_class = %s\n"
2406 "wu reg_class = %s\n"
2407 "wv reg_class = %s\n"
2408 "ww reg_class = %s\n"
2409 "wx reg_class = %s\n"
2410 "wy reg_class = %s\n"
2411 "wz reg_class = %s\n"
2412 "\n",
2413 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2414 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2415 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2416 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2417 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2418 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2419 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2420 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2421 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2422 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2423 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2424 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2425 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2426 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2427 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2428 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2429 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2430 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2431 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2432 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2433 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2434 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2435 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2436 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2437 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2438 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2439 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2441 nl = "\n";
2442 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2443 rs6000_debug_print_mode (m);
2445 fputs ("\n", stderr);
2447 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2449 machine_mode mode1 = print_tieable_modes[m1];
2450 bool first_time = true;
2452 nl = (const char *)0;
2453 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2455 machine_mode mode2 = print_tieable_modes[m2];
2456 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2458 if (first_time)
2460 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2461 nl = "\n";
2462 first_time = false;
2465 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2469 if (!first_time)
2470 fputs ("\n", stderr);
2473 if (nl)
2474 fputs (nl, stderr);
2476 if (rs6000_recip_control)
2478 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2480 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2481 if (rs6000_recip_bits[m])
2483 fprintf (stderr,
2484 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2485 GET_MODE_NAME (m),
2486 (RS6000_RECIP_AUTO_RE_P (m)
2487 ? "auto"
2488 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2489 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2490 ? "auto"
2491 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2494 fputs ("\n", stderr);
2497 if (rs6000_cpu_index >= 0)
2499 const char *name = processor_target_table[rs6000_cpu_index].name;
2500 HOST_WIDE_INT flags
2501 = processor_target_table[rs6000_cpu_index].target_enable;
2503 sprintf (flags_buffer, "-mcpu=%s flags", name);
2504 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2506 else
2507 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2509 if (rs6000_tune_index >= 0)
2511 const char *name = processor_target_table[rs6000_tune_index].name;
2512 HOST_WIDE_INT flags
2513 = processor_target_table[rs6000_tune_index].target_enable;
2515 sprintf (flags_buffer, "-mtune=%s flags", name);
2516 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2518 else
2519 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2521 cl_target_option_save (&cl_opts, &global_options);
2522 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2523 rs6000_isa_flags);
2525 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2526 rs6000_isa_flags_explicit);
2528 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2529 rs6000_builtin_mask);
2531 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2533 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2534 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2536 switch (rs6000_sched_costly_dep)
2538 case max_dep_latency:
2539 costly_str = "max_dep_latency";
2540 break;
2542 case no_dep_costly:
2543 costly_str = "no_dep_costly";
2544 break;
2546 case all_deps_costly:
2547 costly_str = "all_deps_costly";
2548 break;
2550 case true_store_to_load_dep_costly:
2551 costly_str = "true_store_to_load_dep_costly";
2552 break;
2554 case store_to_load_dep_costly:
2555 costly_str = "store_to_load_dep_costly";
2556 break;
2558 default:
2559 costly_str = costly_num;
2560 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2561 break;
2564 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2566 switch (rs6000_sched_insert_nops)
2568 case sched_finish_regroup_exact:
2569 nop_str = "sched_finish_regroup_exact";
2570 break;
2572 case sched_finish_pad_groups:
2573 nop_str = "sched_finish_pad_groups";
2574 break;
2576 case sched_finish_none:
2577 nop_str = "sched_finish_none";
2578 break;
2580 default:
2581 nop_str = nop_num;
2582 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2583 break;
2586 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2588 switch (rs6000_sdata)
2590 default:
2591 case SDATA_NONE:
2592 break;
2594 case SDATA_DATA:
2595 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2596 break;
2598 case SDATA_SYSV:
2599 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2600 break;
2602 case SDATA_EABI:
2603 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2604 break;
2608 switch (rs6000_traceback)
2610 case traceback_default: trace_str = "default"; break;
2611 case traceback_none: trace_str = "none"; break;
2612 case traceback_part: trace_str = "part"; break;
2613 case traceback_full: trace_str = "full"; break;
2614 default: trace_str = "unknown"; break;
2617 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2619 switch (rs6000_current_cmodel)
2621 case CMODEL_SMALL: cmodel_str = "small"; break;
2622 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2623 case CMODEL_LARGE: cmodel_str = "large"; break;
2624 default: cmodel_str = "unknown"; break;
2627 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2629 switch (rs6000_current_abi)
2631 case ABI_NONE: abi_str = "none"; break;
2632 case ABI_AIX: abi_str = "aix"; break;
2633 case ABI_ELFv2: abi_str = "ELFv2"; break;
2634 case ABI_V4: abi_str = "V4"; break;
2635 case ABI_DARWIN: abi_str = "darwin"; break;
2636 default: abi_str = "unknown"; break;
2639 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2641 if (rs6000_altivec_abi)
2642 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2644 if (rs6000_spe_abi)
2645 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2647 if (rs6000_darwin64_abi)
2648 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2650 if (rs6000_float_gprs)
2651 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2653 fprintf (stderr, DEBUG_FMT_S, "fprs",
2654 (TARGET_FPRS ? "true" : "false"));
2656 fprintf (stderr, DEBUG_FMT_S, "single_float",
2657 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2659 fprintf (stderr, DEBUG_FMT_S, "double_float",
2660 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2662 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2663 (TARGET_SOFT_FLOAT ? "true" : "false"));
2665 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2666 (TARGET_E500_SINGLE ? "true" : "false"));
2668 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2669 (TARGET_E500_DOUBLE ? "true" : "false"));
2671 if (TARGET_LINK_STACK)
2672 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2674 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2676 if (TARGET_P8_FUSION)
2678 char options[80];
2680 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2681 if (TARGET_TOC_FUSION)
2682 strcat (options, ", toc");
2684 if (TARGET_P8_FUSION_SIGN)
2685 strcat (options, ", sign");
2687 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2690 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2691 TARGET_SECURE_PLT ? "secure" : "bss");
2692 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2693 aix_struct_return ? "aix" : "sysv");
2694 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2695 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2696 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2697 tf[!!rs6000_align_branch_targets]);
2698 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2699 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2700 rs6000_long_double_type_size);
2701 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2702 (int)rs6000_sched_restricted_insns_priority);
2703 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2704 (int)END_BUILTINS);
2705 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2706 (int)RS6000_BUILTIN_COUNT);
2708 if (TARGET_VSX)
2709 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2710 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2712 if (TARGET_DIRECT_MOVE_128)
2713 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2714 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2718 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2719 legitimate address support to figure out the appropriate addressing to
2720 use. */
2722 static void
2723 rs6000_setup_reg_addr_masks (void)
2725 ssize_t rc, reg, m, nregs;
2726 addr_mask_type any_addr_mask, addr_mask;
2728 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2730 machine_mode m2 = (machine_mode) m;
2731 bool complex_p = false;
2732 size_t msize;
2734 if (COMPLEX_MODE_P (m2))
2736 complex_p = true;
2737 m2 = GET_MODE_INNER (m2);
2740 msize = GET_MODE_SIZE (m2);
2742 /* SDmode is special in that we want to access it only via REG+REG
2743 addressing on power7 and above, since we want to use the LFIWZX and
2744 STFIWZX instructions to load it. */
2745 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2747 any_addr_mask = 0;
2748 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2750 addr_mask = 0;
2751 reg = reload_reg_map[rc].reg;
2753 /* Can mode values go in the GPR/FPR/Altivec registers? */
2754 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2756 nregs = rs6000_hard_regno_nregs[m][reg];
2757 addr_mask |= RELOAD_REG_VALID;
2759 /* Indicate if the mode takes more than 1 physical register. If
2760 it takes a single register, indicate it can do REG+REG
2761 addressing. */
2762 if (nregs > 1 || m == BLKmode || complex_p)
2763 addr_mask |= RELOAD_REG_MULTIPLE;
2764 else
2765 addr_mask |= RELOAD_REG_INDEXED;
2767 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2768 addressing. Restrict addressing on SPE for 64-bit types
2769 because of the SUBREG hackery used to address 64-bit floats in
2770 '32-bit' GPRs. If we allow scalars into Altivec registers,
2771 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2773 if (TARGET_UPDATE
2774 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2775 && msize <= 8
2776 && !VECTOR_MODE_P (m2)
2777 && !FLOAT128_VECTOR_P (m2)
2778 && !complex_p
2779 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2780 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2781 && !(TARGET_E500_DOUBLE && msize == 8))
2783 addr_mask |= RELOAD_REG_PRE_INCDEC;
2785 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2786 we don't allow PRE_MODIFY for some multi-register
2787 operations. */
2788 switch (m)
2790 default:
2791 addr_mask |= RELOAD_REG_PRE_MODIFY;
2792 break;
2794 case DImode:
2795 if (TARGET_POWERPC64)
2796 addr_mask |= RELOAD_REG_PRE_MODIFY;
2797 break;
2799 case DFmode:
2800 case DDmode:
2801 if (TARGET_DF_INSN)
2802 addr_mask |= RELOAD_REG_PRE_MODIFY;
2803 break;
2808 /* GPR and FPR registers can do REG+OFFSET addressing, except
2809 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2810 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2811 if ((addr_mask != 0) && !indexed_only_p
2812 && msize <= 8
2813 && (rc == RELOAD_REG_GPR
2814 || ((msize == 8 || m2 == SFmode)
2815 && (rc == RELOAD_REG_FPR
2816 || (rc == RELOAD_REG_VMX
2817 && TARGET_P9_DFORM_SCALAR)))))
2818 addr_mask |= RELOAD_REG_OFFSET;
2820 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2821 instructions are enabled. The offset for 128-bit VSX registers is
2822 only 12-bits. While GPRs can handle the full offset range, VSX
2823 registers can only handle the restricted range. */
2824 else if ((addr_mask != 0) && !indexed_only_p
2825 && msize == 16 && TARGET_P9_DFORM_VECTOR
2826 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2827 || (m2 == TImode && TARGET_VSX_TIMODE)))
2829 addr_mask |= RELOAD_REG_OFFSET;
2830 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2831 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2834 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2835 addressing on 128-bit types. */
2836 if (rc == RELOAD_REG_VMX && msize == 16
2837 && (addr_mask & RELOAD_REG_VALID) != 0)
2838 addr_mask |= RELOAD_REG_AND_M16;
2840 reg_addr[m].addr_mask[rc] = addr_mask;
2841 any_addr_mask |= addr_mask;
2844 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2849 /* Initialize the various global tables that are based on register size. */
2850 static void
2851 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2853 ssize_t r, m, c;
2854 int align64;
2855 int align32;
2857 /* Precalculate REGNO_REG_CLASS. */
2858 rs6000_regno_regclass[0] = GENERAL_REGS;
2859 for (r = 1; r < 32; ++r)
2860 rs6000_regno_regclass[r] = BASE_REGS;
2862 for (r = 32; r < 64; ++r)
2863 rs6000_regno_regclass[r] = FLOAT_REGS;
2865 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2866 rs6000_regno_regclass[r] = NO_REGS;
2868 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2869 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2871 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2872 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2873 rs6000_regno_regclass[r] = CR_REGS;
2875 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2876 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2877 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2878 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2879 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2880 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2881 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2882 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2883 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2884 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2885 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2886 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2888 /* Precalculate register class to simpler reload register class. We don't
2889 need all of the register classes that are combinations of different
2890 classes, just the simple ones that have constraint letters. */
2891 for (c = 0; c < N_REG_CLASSES; c++)
2892 reg_class_to_reg_type[c] = NO_REG_TYPE;
2894 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2895 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2896 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2897 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2898 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2899 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2900 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2901 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2902 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2903 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2904 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2905 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2907 if (TARGET_VSX)
2909 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2910 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2912 else
2914 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2915 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2918 /* Precalculate the valid memory formats as well as the vector information,
2919 this must be set up before the rs6000_hard_regno_nregs_internal calls
2920 below. */
2921 gcc_assert ((int)VECTOR_NONE == 0);
2922 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2923 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2925 gcc_assert ((int)CODE_FOR_nothing == 0);
2926 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2928 gcc_assert ((int)NO_REGS == 0);
2929 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2931 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2932 believes it can use native alignment or still uses 128-bit alignment. */
2933 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2935 align64 = 64;
2936 align32 = 32;
2938 else
2940 align64 = 128;
2941 align32 = 128;
2944 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
2945 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
2946 if (TARGET_FLOAT128)
2948 rs6000_vector_mem[KFmode] = VECTOR_VSX;
2949 rs6000_vector_align[KFmode] = 128;
2951 if (FLOAT128_IEEE_P (TFmode))
2953 rs6000_vector_mem[TFmode] = VECTOR_VSX;
2954 rs6000_vector_align[TFmode] = 128;
2958 /* V2DF mode, VSX only. */
2959 if (TARGET_VSX)
2961 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2962 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2963 rs6000_vector_align[V2DFmode] = align64;
2966 /* V4SF mode, either VSX or Altivec. */
2967 if (TARGET_VSX)
2969 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2970 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2971 rs6000_vector_align[V4SFmode] = align32;
2973 else if (TARGET_ALTIVEC)
2975 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2976 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2977 rs6000_vector_align[V4SFmode] = align32;
2980 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2981 and stores. */
2982 if (TARGET_ALTIVEC)
2984 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2985 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2986 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2987 rs6000_vector_align[V4SImode] = align32;
2988 rs6000_vector_align[V8HImode] = align32;
2989 rs6000_vector_align[V16QImode] = align32;
2991 if (TARGET_VSX)
2993 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2994 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2995 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2997 else
2999 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3000 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3001 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3005 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3006 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3007 if (TARGET_VSX)
3009 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3010 rs6000_vector_unit[V2DImode]
3011 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3012 rs6000_vector_align[V2DImode] = align64;
3014 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3015 rs6000_vector_unit[V1TImode]
3016 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3017 rs6000_vector_align[V1TImode] = 128;
3020 /* DFmode, see if we want to use the VSX unit. Memory is handled
3021 differently, so don't set rs6000_vector_mem. */
3022 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3024 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3025 rs6000_vector_align[DFmode] = 64;
3028 /* SFmode, see if we want to use the VSX unit. */
3029 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3031 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3032 rs6000_vector_align[SFmode] = 32;
3035 /* Allow TImode in VSX register and set the VSX memory macros. */
3036 if (TARGET_VSX && TARGET_VSX_TIMODE)
3038 rs6000_vector_mem[TImode] = VECTOR_VSX;
3039 rs6000_vector_align[TImode] = align64;
3042 /* TODO add SPE and paired floating point vector support. */
3044 /* Register class constraints for the constraints that depend on compile
3045 switches. When the VSX code was added, different constraints were added
3046 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3047 of the VSX registers are used. The register classes for scalar floating
3048 point types is set, based on whether we allow that type into the upper
3049 (Altivec) registers. GCC has register classes to target the Altivec
3050 registers for load/store operations, to select using a VSX memory
3051 operation instead of the traditional floating point operation. The
3052 constraints are:
3054 d - Register class to use with traditional DFmode instructions.
3055 f - Register class to use with traditional SFmode instructions.
3056 v - Altivec register.
3057 wa - Any VSX register.
3058 wc - Reserved to represent individual CR bits (used in LLVM).
3059 wd - Preferred register class for V2DFmode.
3060 wf - Preferred register class for V4SFmode.
3061 wg - Float register for power6x move insns.
3062 wh - FP register for direct move instructions.
3063 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3064 wj - FP or VSX register to hold 64-bit integers for direct moves.
3065 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3066 wl - Float register if we can do 32-bit signed int loads.
3067 wm - VSX register for ISA 2.07 direct move operations.
3068 wn - always NO_REGS.
3069 wr - GPR if 64-bit mode is permitted.
3070 ws - Register class to do ISA 2.06 DF operations.
3071 wt - VSX register for TImode in VSX registers.
3072 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3073 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3074 ww - Register class to do SF conversions in with VSX operations.
3075 wx - Float register if we can do 32-bit int stores.
3076 wy - Register class to do ISA 2.07 SF operations.
3077 wz - Float register if we can do 32-bit unsigned int loads. */
3079 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3080 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3082 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3083 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3085 if (TARGET_VSX)
3087 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3088 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3089 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3091 if (TARGET_VSX_TIMODE)
3092 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3094 if (TARGET_UPPER_REGS_DF) /* DFmode */
3096 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3097 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3099 else
3100 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3102 if (TARGET_UPPER_REGS_DF) /* DImode */
3103 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3104 else
3105 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3108 /* Add conditional constraints based on various options, to allow us to
3109 collapse multiple insn patterns. */
3110 if (TARGET_ALTIVEC)
3111 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3113 if (TARGET_MFPGPR) /* DFmode */
3114 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3116 if (TARGET_LFIWAX)
3117 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3119 if (TARGET_DIRECT_MOVE)
3121 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3122 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3123 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3124 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3125 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3126 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3129 if (TARGET_POWERPC64)
3130 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3132 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3134 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3135 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3136 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3138 else if (TARGET_P8_VECTOR)
3140 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3141 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3143 else if (TARGET_VSX)
3144 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3146 if (TARGET_STFIWX)
3147 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3149 if (TARGET_LFIWZX)
3150 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3152 if (TARGET_FLOAT128)
3154 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3155 if (FLOAT128_IEEE_P (TFmode))
3156 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3159 /* Support for new D-form instructions. */
3160 if (TARGET_P9_DFORM_SCALAR)
3161 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3163 /* Support for ISA 3.0 (power9) vectors. */
3164 if (TARGET_P9_VECTOR)
3165 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3167 /* Support for new direct moves (ISA 3.0 + 64bit). */
3168 if (TARGET_DIRECT_MOVE_128)
3169 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3171 /* Set up the reload helper and direct move functions. */
3172 if (TARGET_VSX || TARGET_ALTIVEC)
3174 if (TARGET_64BIT)
3176 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3177 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3178 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3179 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3180 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3181 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3182 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3183 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3184 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3185 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3186 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3187 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3188 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3189 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3190 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3191 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3192 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3193 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3194 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3195 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3197 if (FLOAT128_VECTOR_P (KFmode))
3199 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3200 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3203 if (FLOAT128_VECTOR_P (TFmode))
3205 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3206 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3209 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3210 available. */
3211 if (TARGET_NO_SDMODE_STACK)
3213 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3214 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3217 if (TARGET_VSX_TIMODE)
3219 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3220 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3223 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3225 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3226 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3227 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3228 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3229 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3230 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3231 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3232 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3233 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3235 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3236 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3237 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3238 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3239 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3240 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3241 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3242 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3243 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3245 if (FLOAT128_VECTOR_P (KFmode))
3247 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3248 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3251 if (FLOAT128_VECTOR_P (TFmode))
3253 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3254 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3258 else
3260 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3261 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3262 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3263 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3264 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3265 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3266 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3267 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3268 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3269 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3270 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3271 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3272 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3273 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3274 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3275 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3276 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3277 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3278 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3279 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3281 if (FLOAT128_VECTOR_P (KFmode))
3283 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3284 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3287 if (FLOAT128_IEEE_P (TFmode))
3289 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3290 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3293 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3294 available. */
3295 if (TARGET_NO_SDMODE_STACK)
3297 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3298 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3301 if (TARGET_VSX_TIMODE)
3303 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3304 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3307 if (TARGET_DIRECT_MOVE)
3309 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3310 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3311 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3315 if (TARGET_UPPER_REGS_DF)
3316 reg_addr[DFmode].scalar_in_vmx_p = true;
3318 if (TARGET_UPPER_REGS_DI)
3319 reg_addr[DImode].scalar_in_vmx_p = true;
3321 if (TARGET_UPPER_REGS_SF)
3322 reg_addr[SFmode].scalar_in_vmx_p = true;
3325 /* Setup the fusion operations. */
3326 if (TARGET_P8_FUSION)
3328 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3329 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3330 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3331 if (TARGET_64BIT)
3332 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3335 if (TARGET_P9_FUSION)
3337 struct fuse_insns {
3338 enum machine_mode mode; /* mode of the fused type. */
3339 enum machine_mode pmode; /* pointer mode. */
3340 enum rs6000_reload_reg_type rtype; /* register type. */
3341 enum insn_code load; /* load insn. */
3342 enum insn_code store; /* store insn. */
3345 static const struct fuse_insns addis_insns[] = {
3346 { SFmode, DImode, RELOAD_REG_FPR,
3347 CODE_FOR_fusion_fpr_di_sf_load,
3348 CODE_FOR_fusion_fpr_di_sf_store },
3350 { SFmode, SImode, RELOAD_REG_FPR,
3351 CODE_FOR_fusion_fpr_si_sf_load,
3352 CODE_FOR_fusion_fpr_si_sf_store },
3354 { DFmode, DImode, RELOAD_REG_FPR,
3355 CODE_FOR_fusion_fpr_di_df_load,
3356 CODE_FOR_fusion_fpr_di_df_store },
3358 { DFmode, SImode, RELOAD_REG_FPR,
3359 CODE_FOR_fusion_fpr_si_df_load,
3360 CODE_FOR_fusion_fpr_si_df_store },
3362 { DImode, DImode, RELOAD_REG_FPR,
3363 CODE_FOR_fusion_fpr_di_di_load,
3364 CODE_FOR_fusion_fpr_di_di_store },
3366 { DImode, SImode, RELOAD_REG_FPR,
3367 CODE_FOR_fusion_fpr_si_di_load,
3368 CODE_FOR_fusion_fpr_si_di_store },
3370 { QImode, DImode, RELOAD_REG_GPR,
3371 CODE_FOR_fusion_gpr_di_qi_load,
3372 CODE_FOR_fusion_gpr_di_qi_store },
3374 { QImode, SImode, RELOAD_REG_GPR,
3375 CODE_FOR_fusion_gpr_si_qi_load,
3376 CODE_FOR_fusion_gpr_si_qi_store },
3378 { HImode, DImode, RELOAD_REG_GPR,
3379 CODE_FOR_fusion_gpr_di_hi_load,
3380 CODE_FOR_fusion_gpr_di_hi_store },
3382 { HImode, SImode, RELOAD_REG_GPR,
3383 CODE_FOR_fusion_gpr_si_hi_load,
3384 CODE_FOR_fusion_gpr_si_hi_store },
3386 { SImode, DImode, RELOAD_REG_GPR,
3387 CODE_FOR_fusion_gpr_di_si_load,
3388 CODE_FOR_fusion_gpr_di_si_store },
3390 { SImode, SImode, RELOAD_REG_GPR,
3391 CODE_FOR_fusion_gpr_si_si_load,
3392 CODE_FOR_fusion_gpr_si_si_store },
3394 { SFmode, DImode, RELOAD_REG_GPR,
3395 CODE_FOR_fusion_gpr_di_sf_load,
3396 CODE_FOR_fusion_gpr_di_sf_store },
3398 { SFmode, SImode, RELOAD_REG_GPR,
3399 CODE_FOR_fusion_gpr_si_sf_load,
3400 CODE_FOR_fusion_gpr_si_sf_store },
3402 { DImode, DImode, RELOAD_REG_GPR,
3403 CODE_FOR_fusion_gpr_di_di_load,
3404 CODE_FOR_fusion_gpr_di_di_store },
3406 { DFmode, DImode, RELOAD_REG_GPR,
3407 CODE_FOR_fusion_gpr_di_df_load,
3408 CODE_FOR_fusion_gpr_di_df_store },
3411 enum machine_mode cur_pmode = Pmode;
3412 size_t i;
3414 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3416 enum machine_mode xmode = addis_insns[i].mode;
3417 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3419 if (addis_insns[i].pmode != cur_pmode)
3420 continue;
3422 if (rtype == RELOAD_REG_FPR
3423 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3424 continue;
3426 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3427 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3431 /* Note which types we support fusing TOC setup plus memory insn. We only do
3432 fused TOCs for medium/large code models. */
3433 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3434 && (TARGET_CMODEL != CMODEL_SMALL))
3436 reg_addr[QImode].fused_toc = true;
3437 reg_addr[HImode].fused_toc = true;
3438 reg_addr[SImode].fused_toc = true;
3439 reg_addr[DImode].fused_toc = true;
3440 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3442 if (TARGET_SINGLE_FLOAT)
3443 reg_addr[SFmode].fused_toc = true;
3444 if (TARGET_DOUBLE_FLOAT)
3445 reg_addr[DFmode].fused_toc = true;
3449 /* Precalculate HARD_REGNO_NREGS. */
3450 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3451 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3452 rs6000_hard_regno_nregs[m][r]
3453 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3455 /* Precalculate HARD_REGNO_MODE_OK. */
3456 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3457 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3458 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3459 rs6000_hard_regno_mode_ok_p[m][r] = true;
3461 /* Precalculate CLASS_MAX_NREGS sizes. */
3462 for (c = 0; c < LIM_REG_CLASSES; ++c)
3464 int reg_size;
3466 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3467 reg_size = UNITS_PER_VSX_WORD;
3469 else if (c == ALTIVEC_REGS)
3470 reg_size = UNITS_PER_ALTIVEC_WORD;
3472 else if (c == FLOAT_REGS)
3473 reg_size = UNITS_PER_FP_WORD;
3475 else
3476 reg_size = UNITS_PER_WORD;
3478 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3480 machine_mode m2 = (machine_mode)m;
3481 int reg_size2 = reg_size;
3483 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3484 in VSX. */
3485 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3486 reg_size2 = UNITS_PER_FP_WORD;
3488 rs6000_class_max_nregs[m][c]
3489 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3493 if (TARGET_E500_DOUBLE)
3494 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3496 /* Calculate which modes to automatically generate code to use a the
3497 reciprocal divide and square root instructions. In the future, possibly
3498 automatically generate the instructions even if the user did not specify
3499 -mrecip. The older machines double precision reciprocal sqrt estimate is
3500 not accurate enough. */
3501 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3502 if (TARGET_FRES)
3503 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3504 if (TARGET_FRE)
3505 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3506 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3507 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3508 if (VECTOR_UNIT_VSX_P (V2DFmode))
3509 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3511 if (TARGET_FRSQRTES)
3512 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3513 if (TARGET_FRSQRTE)
3514 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3515 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3516 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3517 if (VECTOR_UNIT_VSX_P (V2DFmode))
3518 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3520 if (rs6000_recip_control)
3522 if (!flag_finite_math_only)
3523 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3524 if (flag_trapping_math)
3525 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3526 if (!flag_reciprocal_math)
3527 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3528 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3530 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3531 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3532 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3534 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3535 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3536 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3538 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3539 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3540 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3542 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3543 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3544 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3547 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3548 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3550 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3551 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3552 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3554 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3555 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3556 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3558 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3559 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3560 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3564 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3565 legitimate address support to figure out the appropriate addressing to
3566 use. */
3567 rs6000_setup_reg_addr_masks ();
3569 if (global_init_p || TARGET_DEBUG_TARGET)
3571 if (TARGET_DEBUG_REG)
3572 rs6000_debug_reg_global ();
3574 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3575 fprintf (stderr,
3576 "SImode variable mult cost = %d\n"
3577 "SImode constant mult cost = %d\n"
3578 "SImode short constant mult cost = %d\n"
3579 "DImode multipliciation cost = %d\n"
3580 "SImode division cost = %d\n"
3581 "DImode division cost = %d\n"
3582 "Simple fp operation cost = %d\n"
3583 "DFmode multiplication cost = %d\n"
3584 "SFmode division cost = %d\n"
3585 "DFmode division cost = %d\n"
3586 "cache line size = %d\n"
3587 "l1 cache size = %d\n"
3588 "l2 cache size = %d\n"
3589 "simultaneous prefetches = %d\n"
3590 "\n",
3591 rs6000_cost->mulsi,
3592 rs6000_cost->mulsi_const,
3593 rs6000_cost->mulsi_const9,
3594 rs6000_cost->muldi,
3595 rs6000_cost->divsi,
3596 rs6000_cost->divdi,
3597 rs6000_cost->fp,
3598 rs6000_cost->dmul,
3599 rs6000_cost->sdiv,
3600 rs6000_cost->ddiv,
3601 rs6000_cost->cache_line_size,
3602 rs6000_cost->l1_cache_size,
3603 rs6000_cost->l2_cache_size,
3604 rs6000_cost->simultaneous_prefetches);
3608 #if TARGET_MACHO
3609 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3611 static void
3612 darwin_rs6000_override_options (void)
3614 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3615 off. */
3616 rs6000_altivec_abi = 1;
3617 TARGET_ALTIVEC_VRSAVE = 1;
3618 rs6000_current_abi = ABI_DARWIN;
3620 if (DEFAULT_ABI == ABI_DARWIN
3621 && TARGET_64BIT)
3622 darwin_one_byte_bool = 1;
3624 if (TARGET_64BIT && ! TARGET_POWERPC64)
3626 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3627 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3629 if (flag_mkernel)
3631 rs6000_default_long_calls = 1;
3632 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3635 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3636 Altivec. */
3637 if (!flag_mkernel && !flag_apple_kext
3638 && TARGET_64BIT
3639 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3640 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3642 /* Unless the user (not the configurer) has explicitly overridden
3643 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3644 G4 unless targeting the kernel. */
3645 if (!flag_mkernel
3646 && !flag_apple_kext
3647 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3648 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3649 && ! global_options_set.x_rs6000_cpu_index)
3651 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654 #endif
3656 /* If not otherwise specified by a target, make 'long double' equivalent to
3657 'double'. */
3659 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3660 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3661 #endif
3663 /* Return the builtin mask of the various options used that could affect which
3664 builtins were used. In the past we used target_flags, but we've run out of
3665 bits, and some options like SPE and PAIRED are no longer in
3666 target_flags. */
3668 HOST_WIDE_INT
3669 rs6000_builtin_mask_calculate (void)
3671 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3672 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3673 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3674 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3675 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3676 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3677 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3678 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3679 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3680 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3681 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3682 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3683 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3684 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3685 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3686 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3687 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3688 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3689 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3690 | ((TARGET_FLOAT128) ? RS6000_BTM_FLOAT128 : 0));
3693 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3694 to clobber the XER[CA] bit because clobbering that bit without telling
3695 the compiler worked just fine with versions of GCC before GCC 5, and
3696 breaking a lot of older code in ways that are hard to track down is
3697 not such a great idea. */
3699 static rtx_insn *
3700 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3701 vec<const char *> &/*constraints*/,
3702 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3704 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3705 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3706 return NULL;
3709 /* Override command line options. Mostly we process the processor type and
3710 sometimes adjust other TARGET_ options. */
3712 static bool
3713 rs6000_option_override_internal (bool global_init_p)
3715 bool ret = true;
3716 bool have_cpu = false;
3718 /* The default cpu requested at configure time, if any. */
3719 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3721 HOST_WIDE_INT set_masks;
3722 int cpu_index;
3723 int tune_index;
3724 struct cl_target_option *main_target_opt
3725 = ((global_init_p || target_option_default_node == NULL)
3726 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3728 /* Print defaults. */
3729 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3730 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3732 /* Remember the explicit arguments. */
3733 if (global_init_p)
3734 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3736 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3737 library functions, so warn about it. The flag may be useful for
3738 performance studies from time to time though, so don't disable it
3739 entirely. */
3740 if (global_options_set.x_rs6000_alignment_flags
3741 && rs6000_alignment_flags == MASK_ALIGN_POWER
3742 && DEFAULT_ABI == ABI_DARWIN
3743 && TARGET_64BIT)
3744 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3745 " it is incompatible with the installed C and C++ libraries");
3747 /* Numerous experiment shows that IRA based loop pressure
3748 calculation works better for RTL loop invariant motion on targets
3749 with enough (>= 32) registers. It is an expensive optimization.
3750 So it is on only for peak performance. */
3751 if (optimize >= 3 && global_init_p
3752 && !global_options_set.x_flag_ira_loop_pressure)
3753 flag_ira_loop_pressure = 1;
3755 /* Set the pointer size. */
3756 if (TARGET_64BIT)
3758 rs6000_pmode = (int)DImode;
3759 rs6000_pointer_size = 64;
3761 else
3763 rs6000_pmode = (int)SImode;
3764 rs6000_pointer_size = 32;
3767 /* Some OSs don't support saving the high part of 64-bit registers on context
3768 switch. Other OSs don't support saving Altivec registers. On those OSs,
3769 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3770 if the user wants either, the user must explicitly specify them and we
3771 won't interfere with the user's specification. */
3773 set_masks = POWERPC_MASKS;
3774 #ifdef OS_MISSING_POWERPC64
3775 if (OS_MISSING_POWERPC64)
3776 set_masks &= ~OPTION_MASK_POWERPC64;
3777 #endif
3778 #ifdef OS_MISSING_ALTIVEC
3779 if (OS_MISSING_ALTIVEC)
3780 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3781 #endif
3783 /* Don't override by the processor default if given explicitly. */
3784 set_masks &= ~rs6000_isa_flags_explicit;
3786 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3787 the cpu in a target attribute or pragma, but did not specify a tuning
3788 option, use the cpu for the tuning option rather than the option specified
3789 with -mtune on the command line. Process a '--with-cpu' configuration
3790 request as an implicit --cpu. */
3791 if (rs6000_cpu_index >= 0)
3793 cpu_index = rs6000_cpu_index;
3794 have_cpu = true;
3796 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3798 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3799 have_cpu = true;
3801 else if (implicit_cpu)
3803 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3804 have_cpu = true;
3806 else
3808 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3809 const char *default_cpu = ((!TARGET_POWERPC64)
3810 ? "powerpc"
3811 : ((BYTES_BIG_ENDIAN)
3812 ? "powerpc64"
3813 : "powerpc64le"));
3815 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3816 have_cpu = false;
3819 gcc_assert (cpu_index >= 0);
3821 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3822 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3823 with those from the cpu, except for options that were explicitly set. If
3824 we don't have a cpu, do not override the target bits set in
3825 TARGET_DEFAULT. */
3826 if (have_cpu)
3828 rs6000_isa_flags &= ~set_masks;
3829 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3830 & set_masks);
3832 else
3834 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3835 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3836 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3837 to using rs6000_isa_flags, we need to do the initialization here.
3839 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3840 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3841 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
3842 : processor_target_table[cpu_index].target_enable);
3843 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3846 if (rs6000_tune_index >= 0)
3847 tune_index = rs6000_tune_index;
3848 else if (have_cpu)
3850 /* Until power9 tuning is available, use power8 tuning if -mcpu=power9. */
3851 if (processor_target_table[cpu_index].processor != PROCESSOR_POWER9)
3852 rs6000_tune_index = tune_index = cpu_index;
3853 else
3855 size_t i;
3856 tune_index = -1;
3857 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3858 if (processor_target_table[i].processor == PROCESSOR_POWER8)
3860 rs6000_tune_index = tune_index = i;
3861 break;
3865 else
3867 size_t i;
3868 enum processor_type tune_proc
3869 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3871 tune_index = -1;
3872 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3873 if (processor_target_table[i].processor == tune_proc)
3875 rs6000_tune_index = tune_index = i;
3876 break;
3880 gcc_assert (tune_index >= 0);
3881 rs6000_cpu = processor_target_table[tune_index].processor;
3883 /* Pick defaults for SPE related control flags. Do this early to make sure
3884 that the TARGET_ macros are representative ASAP. */
3886 int spe_capable_cpu =
3887 (rs6000_cpu == PROCESSOR_PPC8540
3888 || rs6000_cpu == PROCESSOR_PPC8548);
3890 if (!global_options_set.x_rs6000_spe_abi)
3891 rs6000_spe_abi = spe_capable_cpu;
3893 if (!global_options_set.x_rs6000_spe)
3894 rs6000_spe = spe_capable_cpu;
3896 if (!global_options_set.x_rs6000_float_gprs)
3897 rs6000_float_gprs =
3898 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3899 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3900 : 0);
3903 if (global_options_set.x_rs6000_spe_abi
3904 && rs6000_spe_abi
3905 && !TARGET_SPE_ABI)
3906 error ("not configured for SPE ABI");
3908 if (global_options_set.x_rs6000_spe
3909 && rs6000_spe
3910 && !TARGET_SPE)
3911 error ("not configured for SPE instruction set");
3913 if (main_target_opt != NULL
3914 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3915 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3916 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3917 error ("target attribute or pragma changes SPE ABI");
3919 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3920 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3921 || rs6000_cpu == PROCESSOR_PPCE5500)
3923 if (TARGET_ALTIVEC)
3924 error ("AltiVec not supported in this target");
3925 if (TARGET_SPE)
3926 error ("SPE not supported in this target");
3928 if (rs6000_cpu == PROCESSOR_PPCE6500)
3930 if (TARGET_SPE)
3931 error ("SPE not supported in this target");
3934 /* Disable Cell microcode if we are optimizing for the Cell
3935 and not optimizing for size. */
3936 if (rs6000_gen_cell_microcode == -1)
3937 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3938 && !optimize_size);
3940 /* If we are optimizing big endian systems for space and it's OK to
3941 use instructions that would be microcoded on the Cell, use the
3942 load/store multiple and string instructions. */
3943 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3944 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3945 | OPTION_MASK_STRING);
3947 /* Don't allow -mmultiple or -mstring on little endian systems
3948 unless the cpu is a 750, because the hardware doesn't support the
3949 instructions used in little endian mode, and causes an alignment
3950 trap. The 750 does not cause an alignment trap (except when the
3951 target is unaligned). */
3953 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3955 if (TARGET_MULTIPLE)
3957 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3958 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3959 warning (0, "-mmultiple is not supported on little endian systems");
3962 if (TARGET_STRING)
3964 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3965 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3966 warning (0, "-mstring is not supported on little endian systems");
3970 /* If little-endian, default to -mstrict-align on older processors.
3971 Testing for htm matches power8 and later. */
3972 if (!BYTES_BIG_ENDIAN
3973 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3974 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3976 /* -maltivec={le,be} implies -maltivec. */
3977 if (rs6000_altivec_element_order != 0)
3978 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3980 /* Disallow -maltivec=le in big endian mode for now. This is not
3981 known to be useful for anyone. */
3982 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3984 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3985 rs6000_altivec_element_order = 0;
3988 /* Add some warnings for VSX. */
3989 if (TARGET_VSX)
3991 const char *msg = NULL;
3992 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3993 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3995 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3996 msg = N_("-mvsx requires hardware floating point");
3997 else
3999 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4000 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4003 else if (TARGET_PAIRED_FLOAT)
4004 msg = N_("-mvsx and -mpaired are incompatible");
4005 else if (TARGET_AVOID_XFORM > 0)
4006 msg = N_("-mvsx needs indexed addressing");
4007 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4008 & OPTION_MASK_ALTIVEC))
4010 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4011 msg = N_("-mvsx and -mno-altivec are incompatible");
4012 else
4013 msg = N_("-mno-altivec disables vsx");
4016 if (msg)
4018 warning (0, msg);
4019 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4020 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4024 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4025 the -mcpu setting to enable options that conflict. */
4026 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4027 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4028 | OPTION_MASK_ALTIVEC
4029 | OPTION_MASK_VSX)) != 0)
4030 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4031 | OPTION_MASK_DIRECT_MOVE)
4032 & ~rs6000_isa_flags_explicit);
4034 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4035 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4037 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4038 unless the user explicitly used the -mno-<option> to disable the code. */
4039 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4040 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0 || TARGET_P9_MINMAX)
4041 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4042 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4043 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4044 else if (TARGET_VSX)
4045 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4046 else if (TARGET_POPCNTD)
4047 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4048 else if (TARGET_DFP)
4049 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4050 else if (TARGET_CMPB)
4051 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4052 else if (TARGET_FPRND)
4053 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
4054 else if (TARGET_POPCNTB)
4055 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
4056 else if (TARGET_ALTIVEC)
4057 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
4059 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4061 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4062 error ("-mcrypto requires -maltivec");
4063 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4066 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4068 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4069 error ("-mdirect-move requires -mvsx");
4070 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4073 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4075 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4076 error ("-mpower8-vector requires -maltivec");
4077 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4080 if (TARGET_P8_VECTOR && !TARGET_VSX)
4082 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4083 error ("-mpower8-vector requires -mvsx");
4084 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4087 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4089 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4090 error ("-mvsx-timode requires -mvsx");
4091 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4094 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4096 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4097 error ("-mhard-dfp requires -mhard-float");
4098 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4101 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4102 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4103 set the individual option. */
4104 if (TARGET_UPPER_REGS > 0)
4106 if (TARGET_VSX
4107 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4109 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4110 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4112 if (TARGET_VSX
4113 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4115 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4116 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4118 if (TARGET_P8_VECTOR
4119 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4121 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4122 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4125 else if (TARGET_UPPER_REGS == 0)
4127 if (TARGET_VSX
4128 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4130 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4131 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4133 if (TARGET_VSX
4134 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4136 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4137 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4139 if (TARGET_P8_VECTOR
4140 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4142 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4143 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4147 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4149 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4150 error ("-mupper-regs-df requires -mvsx");
4151 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4154 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4156 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4157 error ("-mupper-regs-di requires -mvsx");
4158 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4161 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4163 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4164 error ("-mupper-regs-sf requires -mpower8-vector");
4165 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4168 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4169 silently turn off quad memory mode. */
4170 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4172 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4173 warning (0, N_("-mquad-memory requires 64-bit mode"));
4175 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4176 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4178 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4179 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4182 /* Non-atomic quad memory load/store are disabled for little endian, since
4183 the words are reversed, but atomic operations can still be done by
4184 swapping the words. */
4185 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4187 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4188 warning (0, N_("-mquad-memory is not available in little endian mode"));
4190 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4193 /* Assume if the user asked for normal quad memory instructions, they want
4194 the atomic versions as well, unless they explicity told us not to use quad
4195 word atomic instructions. */
4196 if (TARGET_QUAD_MEMORY
4197 && !TARGET_QUAD_MEMORY_ATOMIC
4198 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4199 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4201 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4202 generating power8 instructions. */
4203 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4204 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4205 & OPTION_MASK_P8_FUSION);
4207 /* Setting additional fusion flags turns on base fusion. */
4208 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4210 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4212 if (TARGET_P8_FUSION_SIGN)
4213 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4215 if (TARGET_TOC_FUSION)
4216 error ("-mtoc-fusion requires -mpower8-fusion");
4218 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4220 else
4221 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4224 /* Power9 fusion is a superset over power8 fusion. */
4225 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4227 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4229 error ("-mpower9-fusion requires -mpower8-fusion");
4230 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4232 else
4233 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4236 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4237 generating power9 instructions. */
4238 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4239 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4240 & OPTION_MASK_P9_FUSION);
4242 /* Power8 does not fuse sign extended loads with the addis. If we are
4243 optimizing at high levels for speed, convert a sign extended load into a
4244 zero extending load, and an explicit sign extension. */
4245 if (TARGET_P8_FUSION
4246 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4247 && optimize_function_for_speed_p (cfun)
4248 && optimize >= 3)
4249 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4251 /* TOC fusion requires 64-bit and medium/large code model. */
4252 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4254 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4255 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4256 warning (0, N_("-mtoc-fusion requires 64-bit"));
4259 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4261 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4262 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4263 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4266 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4267 model. */
4268 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4269 && (TARGET_CMODEL != CMODEL_SMALL)
4270 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4271 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4273 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4274 -mpower9-dform-vector. */
4275 if (TARGET_P9_DFORM_BOTH > 0)
4277 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4278 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4280 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4281 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4283 else if (TARGET_P9_DFORM_BOTH == 0)
4285 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4286 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4288 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4289 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4292 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4293 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4295 if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4296 error ("-mpower9-dform requires -mpower9-vector");
4297 rs6000_isa_flags &= ~(OPTION_MASK_P9_DFORM_SCALAR
4298 | OPTION_MASK_P9_DFORM_VECTOR);
4301 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4303 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4304 error ("-mpower9-dform requires -mupper-regs-df");
4305 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4308 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4310 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4311 error ("-mpower9-dform requires -mupper-regs-sf");
4312 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4315 /* ISA 3.0 vector instructions include ISA 2.07. */
4316 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4318 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4319 error ("-mpower9-vector requires -mpower8-vector");
4320 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4323 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4324 but do show up with -mno-lra. Given -mlra will become the default once
4325 PR 69847 is fixed, turn off the options with problems by default if
4326 -mno-lra was used, and warn if the user explicitly asked for the option.
4328 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4329 Enable -mvsx-timode by default if LRA and VSX. */
4330 if (!TARGET_LRA)
4332 if (TARGET_VSX_TIMODE)
4334 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4335 warning (0, "-mvsx-timode might need -mlra");
4337 else
4338 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4342 else
4344 if (TARGET_VSX && !TARGET_VSX_TIMODE
4345 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4346 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4349 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4350 support. If we only have ISA 2.06 support, and the user did not specify
4351 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4352 but we don't enable the full vectorization support */
4353 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4354 TARGET_ALLOW_MOVMISALIGN = 1;
4356 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4358 if (TARGET_ALLOW_MOVMISALIGN > 0
4359 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4360 error ("-mallow-movmisalign requires -mvsx");
4362 TARGET_ALLOW_MOVMISALIGN = 0;
4365 /* Determine when unaligned vector accesses are permitted, and when
4366 they are preferred over masked Altivec loads. Note that if
4367 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4368 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4369 not true. */
4370 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4372 if (!TARGET_VSX)
4374 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4375 error ("-mefficient-unaligned-vsx requires -mvsx");
4377 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4380 else if (!TARGET_ALLOW_MOVMISALIGN)
4382 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4383 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4385 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4389 /* __float128 requires VSX support. */
4390 if (TARGET_FLOAT128 && !TARGET_VSX)
4392 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) != 0)
4393 error ("-mfloat128 requires VSX support");
4395 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128 | OPTION_MASK_FLOAT128_HW);
4398 /* IEEE 128-bit floating point hardware instructions imply enabling
4399 __float128. */
4400 if (TARGET_FLOAT128_HW
4401 && (rs6000_isa_flags & (OPTION_MASK_P9_VECTOR
4402 | OPTION_MASK_DIRECT_MOVE
4403 | OPTION_MASK_UPPER_REGS_DI
4404 | OPTION_MASK_UPPER_REGS_DF
4405 | OPTION_MASK_UPPER_REGS_SF)) == 0)
4407 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4408 error ("-mfloat128-hardware requires full ISA 3.0 support");
4410 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4413 else if (TARGET_P9_VECTOR && !TARGET_FLOAT128_HW
4414 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) == 0)
4415 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4417 if (TARGET_FLOAT128_HW
4418 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128) == 0)
4419 rs6000_isa_flags |= OPTION_MASK_FLOAT128;
4421 /* Print the options after updating the defaults. */
4422 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4423 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4425 /* E500mc does "better" if we inline more aggressively. Respect the
4426 user's opinion, though. */
4427 if (rs6000_block_move_inline_limit == 0
4428 && (rs6000_cpu == PROCESSOR_PPCE500MC
4429 || rs6000_cpu == PROCESSOR_PPCE500MC64
4430 || rs6000_cpu == PROCESSOR_PPCE5500
4431 || rs6000_cpu == PROCESSOR_PPCE6500))
4432 rs6000_block_move_inline_limit = 128;
4434 /* store_one_arg depends on expand_block_move to handle at least the
4435 size of reg_parm_stack_space. */
4436 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4437 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4439 if (global_init_p)
4441 /* If the appropriate debug option is enabled, replace the target hooks
4442 with debug versions that call the real version and then prints
4443 debugging information. */
4444 if (TARGET_DEBUG_COST)
4446 targetm.rtx_costs = rs6000_debug_rtx_costs;
4447 targetm.address_cost = rs6000_debug_address_cost;
4448 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4451 if (TARGET_DEBUG_ADDR)
4453 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4454 targetm.legitimize_address = rs6000_debug_legitimize_address;
4455 rs6000_secondary_reload_class_ptr
4456 = rs6000_debug_secondary_reload_class;
4457 rs6000_secondary_memory_needed_ptr
4458 = rs6000_debug_secondary_memory_needed;
4459 rs6000_cannot_change_mode_class_ptr
4460 = rs6000_debug_cannot_change_mode_class;
4461 rs6000_preferred_reload_class_ptr
4462 = rs6000_debug_preferred_reload_class;
4463 rs6000_legitimize_reload_address_ptr
4464 = rs6000_debug_legitimize_reload_address;
4465 rs6000_mode_dependent_address_ptr
4466 = rs6000_debug_mode_dependent_address;
4469 if (rs6000_veclibabi_name)
4471 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4472 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4473 else
4475 error ("unknown vectorization library ABI type (%s) for "
4476 "-mveclibabi= switch", rs6000_veclibabi_name);
4477 ret = false;
4482 if (!global_options_set.x_rs6000_long_double_type_size)
4484 if (main_target_opt != NULL
4485 && (main_target_opt->x_rs6000_long_double_type_size
4486 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4487 error ("target attribute or pragma changes long double size");
4488 else
4489 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4492 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4493 if (!global_options_set.x_rs6000_ieeequad)
4494 rs6000_ieeequad = 1;
4495 #endif
4497 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4498 target attribute or pragma which automatically enables both options,
4499 unless the altivec ABI was set. This is set by default for 64-bit, but
4500 not for 32-bit. */
4501 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4502 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4503 | OPTION_MASK_FLOAT128)
4504 & ~rs6000_isa_flags_explicit);
4506 /* Enable Altivec ABI for AIX -maltivec. */
4507 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4509 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4510 error ("target attribute or pragma changes AltiVec ABI");
4511 else
4512 rs6000_altivec_abi = 1;
4515 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4516 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4517 be explicitly overridden in either case. */
4518 if (TARGET_ELF)
4520 if (!global_options_set.x_rs6000_altivec_abi
4521 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4523 if (main_target_opt != NULL &&
4524 !main_target_opt->x_rs6000_altivec_abi)
4525 error ("target attribute or pragma changes AltiVec ABI");
4526 else
4527 rs6000_altivec_abi = 1;
4531 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4532 So far, the only darwin64 targets are also MACH-O. */
4533 if (TARGET_MACHO
4534 && DEFAULT_ABI == ABI_DARWIN
4535 && TARGET_64BIT)
4537 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4538 error ("target attribute or pragma changes darwin64 ABI");
4539 else
4541 rs6000_darwin64_abi = 1;
4542 /* Default to natural alignment, for better performance. */
4543 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4547 /* Place FP constants in the constant pool instead of TOC
4548 if section anchors enabled. */
4549 if (flag_section_anchors
4550 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4551 TARGET_NO_FP_IN_TOC = 1;
4553 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4554 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4556 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4557 SUBTARGET_OVERRIDE_OPTIONS;
4558 #endif
4559 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4560 SUBSUBTARGET_OVERRIDE_OPTIONS;
4561 #endif
4562 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4563 SUB3TARGET_OVERRIDE_OPTIONS;
4564 #endif
4566 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4567 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4569 /* For the E500 family of cores, reset the single/double FP flags to let us
4570 check that they remain constant across attributes or pragmas. Also,
4571 clear a possible request for string instructions, not supported and which
4572 we might have silently queried above for -Os.
4574 For other families, clear ISEL in case it was set implicitly.
4577 switch (rs6000_cpu)
4579 case PROCESSOR_PPC8540:
4580 case PROCESSOR_PPC8548:
4581 case PROCESSOR_PPCE500MC:
4582 case PROCESSOR_PPCE500MC64:
4583 case PROCESSOR_PPCE5500:
4584 case PROCESSOR_PPCE6500:
4586 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
4587 rs6000_double_float = TARGET_E500_DOUBLE;
4589 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4591 break;
4593 default:
4595 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4596 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4598 break;
4601 if (main_target_opt)
4603 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4604 error ("target attribute or pragma changes single precision floating "
4605 "point");
4606 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4607 error ("target attribute or pragma changes double precision floating "
4608 "point");
4611 /* Detect invalid option combinations with E500. */
4612 CHECK_E500_OPTIONS;
4614 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4615 && rs6000_cpu != PROCESSOR_POWER5
4616 && rs6000_cpu != PROCESSOR_POWER6
4617 && rs6000_cpu != PROCESSOR_POWER7
4618 && rs6000_cpu != PROCESSOR_POWER8
4619 && rs6000_cpu != PROCESSOR_POWER9
4620 && rs6000_cpu != PROCESSOR_PPCA2
4621 && rs6000_cpu != PROCESSOR_CELL
4622 && rs6000_cpu != PROCESSOR_PPC476);
4623 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4624 || rs6000_cpu == PROCESSOR_POWER5
4625 || rs6000_cpu == PROCESSOR_POWER7
4626 || rs6000_cpu == PROCESSOR_POWER8
4627 || rs6000_cpu == PROCESSOR_POWER9);
4628 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4629 || rs6000_cpu == PROCESSOR_POWER5
4630 || rs6000_cpu == PROCESSOR_POWER6
4631 || rs6000_cpu == PROCESSOR_POWER7
4632 || rs6000_cpu == PROCESSOR_POWER8
4633 || rs6000_cpu == PROCESSOR_POWER9
4634 || rs6000_cpu == PROCESSOR_PPCE500MC
4635 || rs6000_cpu == PROCESSOR_PPCE500MC64
4636 || rs6000_cpu == PROCESSOR_PPCE5500
4637 || rs6000_cpu == PROCESSOR_PPCE6500);
4639 /* Allow debug switches to override the above settings. These are set to -1
4640 in rs6000.opt to indicate the user hasn't directly set the switch. */
4641 if (TARGET_ALWAYS_HINT >= 0)
4642 rs6000_always_hint = TARGET_ALWAYS_HINT;
4644 if (TARGET_SCHED_GROUPS >= 0)
4645 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4647 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4648 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4650 rs6000_sched_restricted_insns_priority
4651 = (rs6000_sched_groups ? 1 : 0);
4653 /* Handle -msched-costly-dep option. */
4654 rs6000_sched_costly_dep
4655 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4657 if (rs6000_sched_costly_dep_str)
4659 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4660 rs6000_sched_costly_dep = no_dep_costly;
4661 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4662 rs6000_sched_costly_dep = all_deps_costly;
4663 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4664 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4665 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4666 rs6000_sched_costly_dep = store_to_load_dep_costly;
4667 else
4668 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4669 atoi (rs6000_sched_costly_dep_str));
4672 /* Handle -minsert-sched-nops option. */
4673 rs6000_sched_insert_nops
4674 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4676 if (rs6000_sched_insert_nops_str)
4678 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4679 rs6000_sched_insert_nops = sched_finish_none;
4680 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4681 rs6000_sched_insert_nops = sched_finish_pad_groups;
4682 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4683 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4684 else
4685 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4686 atoi (rs6000_sched_insert_nops_str));
4689 if (global_init_p)
4691 #ifdef TARGET_REGNAMES
4692 /* If the user desires alternate register names, copy in the
4693 alternate names now. */
4694 if (TARGET_REGNAMES)
4695 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4696 #endif
4698 /* Set aix_struct_return last, after the ABI is determined.
4699 If -maix-struct-return or -msvr4-struct-return was explicitly
4700 used, don't override with the ABI default. */
4701 if (!global_options_set.x_aix_struct_return)
4702 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4704 #if 0
4705 /* IBM XL compiler defaults to unsigned bitfields. */
4706 if (TARGET_XL_COMPAT)
4707 flag_signed_bitfields = 0;
4708 #endif
4710 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4711 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4713 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4715 /* We can only guarantee the availability of DI pseudo-ops when
4716 assembling for 64-bit targets. */
4717 if (!TARGET_64BIT)
4719 targetm.asm_out.aligned_op.di = NULL;
4720 targetm.asm_out.unaligned_op.di = NULL;
4724 /* Set branch target alignment, if not optimizing for size. */
4725 if (!optimize_size)
4727 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4728 aligned 8byte to avoid misprediction by the branch predictor. */
4729 if (rs6000_cpu == PROCESSOR_TITAN
4730 || rs6000_cpu == PROCESSOR_CELL)
4732 if (align_functions <= 0)
4733 align_functions = 8;
4734 if (align_jumps <= 0)
4735 align_jumps = 8;
4736 if (align_loops <= 0)
4737 align_loops = 8;
4739 if (rs6000_align_branch_targets)
4741 if (align_functions <= 0)
4742 align_functions = 16;
4743 if (align_jumps <= 0)
4744 align_jumps = 16;
4745 if (align_loops <= 0)
4747 can_override_loop_align = 1;
4748 align_loops = 16;
4751 if (align_jumps_max_skip <= 0)
4752 align_jumps_max_skip = 15;
4753 if (align_loops_max_skip <= 0)
4754 align_loops_max_skip = 15;
4757 /* Arrange to save and restore machine status around nested functions. */
4758 init_machine_status = rs6000_init_machine_status;
4760 /* We should always be splitting complex arguments, but we can't break
4761 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4762 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4763 targetm.calls.split_complex_arg = NULL;
4766 /* Initialize rs6000_cost with the appropriate target costs. */
4767 if (optimize_size)
4768 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4769 else
4770 switch (rs6000_cpu)
4772 case PROCESSOR_RS64A:
4773 rs6000_cost = &rs64a_cost;
4774 break;
4776 case PROCESSOR_MPCCORE:
4777 rs6000_cost = &mpccore_cost;
4778 break;
4780 case PROCESSOR_PPC403:
4781 rs6000_cost = &ppc403_cost;
4782 break;
4784 case PROCESSOR_PPC405:
4785 rs6000_cost = &ppc405_cost;
4786 break;
4788 case PROCESSOR_PPC440:
4789 rs6000_cost = &ppc440_cost;
4790 break;
4792 case PROCESSOR_PPC476:
4793 rs6000_cost = &ppc476_cost;
4794 break;
4796 case PROCESSOR_PPC601:
4797 rs6000_cost = &ppc601_cost;
4798 break;
4800 case PROCESSOR_PPC603:
4801 rs6000_cost = &ppc603_cost;
4802 break;
4804 case PROCESSOR_PPC604:
4805 rs6000_cost = &ppc604_cost;
4806 break;
4808 case PROCESSOR_PPC604e:
4809 rs6000_cost = &ppc604e_cost;
4810 break;
4812 case PROCESSOR_PPC620:
4813 rs6000_cost = &ppc620_cost;
4814 break;
4816 case PROCESSOR_PPC630:
4817 rs6000_cost = &ppc630_cost;
4818 break;
4820 case PROCESSOR_CELL:
4821 rs6000_cost = &ppccell_cost;
4822 break;
4824 case PROCESSOR_PPC750:
4825 case PROCESSOR_PPC7400:
4826 rs6000_cost = &ppc750_cost;
4827 break;
4829 case PROCESSOR_PPC7450:
4830 rs6000_cost = &ppc7450_cost;
4831 break;
4833 case PROCESSOR_PPC8540:
4834 case PROCESSOR_PPC8548:
4835 rs6000_cost = &ppc8540_cost;
4836 break;
4838 case PROCESSOR_PPCE300C2:
4839 case PROCESSOR_PPCE300C3:
4840 rs6000_cost = &ppce300c2c3_cost;
4841 break;
4843 case PROCESSOR_PPCE500MC:
4844 rs6000_cost = &ppce500mc_cost;
4845 break;
4847 case PROCESSOR_PPCE500MC64:
4848 rs6000_cost = &ppce500mc64_cost;
4849 break;
4851 case PROCESSOR_PPCE5500:
4852 rs6000_cost = &ppce5500_cost;
4853 break;
4855 case PROCESSOR_PPCE6500:
4856 rs6000_cost = &ppce6500_cost;
4857 break;
4859 case PROCESSOR_TITAN:
4860 rs6000_cost = &titan_cost;
4861 break;
4863 case PROCESSOR_POWER4:
4864 case PROCESSOR_POWER5:
4865 rs6000_cost = &power4_cost;
4866 break;
4868 case PROCESSOR_POWER6:
4869 rs6000_cost = &power6_cost;
4870 break;
4872 case PROCESSOR_POWER7:
4873 rs6000_cost = &power7_cost;
4874 break;
4876 case PROCESSOR_POWER8:
4877 rs6000_cost = &power8_cost;
4878 break;
4880 case PROCESSOR_POWER9:
4881 rs6000_cost = &power9_cost;
4882 break;
4884 case PROCESSOR_PPCA2:
4885 rs6000_cost = &ppca2_cost;
4886 break;
4888 default:
4889 gcc_unreachable ();
4892 if (global_init_p)
4894 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4895 rs6000_cost->simultaneous_prefetches,
4896 global_options.x_param_values,
4897 global_options_set.x_param_values);
4898 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4899 global_options.x_param_values,
4900 global_options_set.x_param_values);
4901 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4902 rs6000_cost->cache_line_size,
4903 global_options.x_param_values,
4904 global_options_set.x_param_values);
4905 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4906 global_options.x_param_values,
4907 global_options_set.x_param_values);
4909 /* Increase loop peeling limits based on performance analysis. */
4910 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4911 global_options.x_param_values,
4912 global_options_set.x_param_values);
4913 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4914 global_options.x_param_values,
4915 global_options_set.x_param_values);
4917 /* If using typedef char *va_list, signal that
4918 __builtin_va_start (&ap, 0) can be optimized to
4919 ap = __builtin_next_arg (0). */
4920 if (DEFAULT_ABI != ABI_V4)
4921 targetm.expand_builtin_va_start = NULL;
4924 /* Set up single/double float flags.
4925 If TARGET_HARD_FLOAT is set, but neither single or double is set,
4926 then set both flags. */
4927 if (TARGET_HARD_FLOAT && TARGET_FPRS
4928 && rs6000_single_float == 0 && rs6000_double_float == 0)
4929 rs6000_single_float = rs6000_double_float = 1;
4931 /* If not explicitly specified via option, decide whether to generate indexed
4932 load/store instructions. */
4933 if (TARGET_AVOID_XFORM == -1)
4934 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4935 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4936 need indexed accesses and the type used is the scalar type of the element
4937 being loaded or stored. */
4938 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4939 && !TARGET_ALTIVEC);
4941 /* Set the -mrecip options. */
4942 if (rs6000_recip_name)
4944 char *p = ASTRDUP (rs6000_recip_name);
4945 char *q;
4946 unsigned int mask, i;
4947 bool invert;
4949 while ((q = strtok (p, ",")) != NULL)
4951 p = NULL;
4952 if (*q == '!')
4954 invert = true;
4955 q++;
4957 else
4958 invert = false;
4960 if (!strcmp (q, "default"))
4961 mask = ((TARGET_RECIP_PRECISION)
4962 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4963 else
4965 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4966 if (!strcmp (q, recip_options[i].string))
4968 mask = recip_options[i].mask;
4969 break;
4972 if (i == ARRAY_SIZE (recip_options))
4974 error ("unknown option for -mrecip=%s", q);
4975 invert = false;
4976 mask = 0;
4977 ret = false;
4981 if (invert)
4982 rs6000_recip_control &= ~mask;
4983 else
4984 rs6000_recip_control |= mask;
4988 /* Set the builtin mask of the various options used that could affect which
4989 builtins were used. In the past we used target_flags, but we've run out
4990 of bits, and some options like SPE and PAIRED are no longer in
4991 target_flags. */
4992 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4993 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4994 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4995 rs6000_builtin_mask);
4997 /* Initialize all of the registers. */
4998 rs6000_init_hard_regno_mode_ok (global_init_p);
5000 /* Save the initial options in case the user does function specific options */
5001 if (global_init_p)
5002 target_option_default_node = target_option_current_node
5003 = build_target_option_node (&global_options);
5005 /* If not explicitly specified via option, decide whether to generate the
5006 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5007 if (TARGET_LINK_STACK == -1)
5008 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5010 return ret;
5013 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5014 define the target cpu type. */
5016 static void
5017 rs6000_option_override (void)
5019 (void) rs6000_option_override_internal (true);
5021 /* Register machine-specific passes. This needs to be done at start-up.
5022 It's convenient to do it here (like i386 does). */
5023 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
5025 struct register_pass_info analyze_swaps_info
5026 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
5028 register_pass (&analyze_swaps_info);
5032 /* Implement targetm.vectorize.builtin_mask_for_load. */
5033 static tree
5034 rs6000_builtin_mask_for_load (void)
5036 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5037 if ((TARGET_ALTIVEC && !TARGET_VSX)
5038 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5039 return altivec_builtin_mask_for_load;
5040 else
5041 return 0;
5044 /* Implement LOOP_ALIGN. */
5046 rs6000_loop_align (rtx label)
5048 basic_block bb;
5049 int ninsns;
5051 /* Don't override loop alignment if -falign-loops was specified. */
5052 if (!can_override_loop_align)
5053 return align_loops_log;
5055 bb = BLOCK_FOR_INSN (label);
5056 ninsns = num_loop_insns(bb->loop_father);
5058 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5059 if (ninsns > 4 && ninsns <= 8
5060 && (rs6000_cpu == PROCESSOR_POWER4
5061 || rs6000_cpu == PROCESSOR_POWER5
5062 || rs6000_cpu == PROCESSOR_POWER6
5063 || rs6000_cpu == PROCESSOR_POWER7
5064 || rs6000_cpu == PROCESSOR_POWER8
5065 || rs6000_cpu == PROCESSOR_POWER9))
5066 return 5;
5067 else
5068 return align_loops_log;
5071 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5072 static int
5073 rs6000_loop_align_max_skip (rtx_insn *label)
5075 return (1 << rs6000_loop_align (label)) - 1;
5078 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5079 after applying N number of iterations. This routine does not determine
5080 how may iterations are required to reach desired alignment. */
5082 static bool
5083 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5085 if (is_packed)
5086 return false;
5088 if (TARGET_32BIT)
5090 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5091 return true;
5093 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5094 return true;
5096 return false;
5098 else
5100 if (TARGET_MACHO)
5101 return false;
5103 /* Assuming that all other types are naturally aligned. CHECKME! */
5104 return true;
5108 /* Return true if the vector misalignment factor is supported by the
5109 target. */
5110 static bool
5111 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5112 const_tree type,
5113 int misalignment,
5114 bool is_packed)
5116 if (TARGET_VSX)
5118 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5119 return true;
5121 /* Return if movmisalign pattern is not supported for this mode. */
5122 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5123 return false;
5125 if (misalignment == -1)
5127 /* Misalignment factor is unknown at compile time but we know
5128 it's word aligned. */
5129 if (rs6000_vector_alignment_reachable (type, is_packed))
5131 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5133 if (element_size == 64 || element_size == 32)
5134 return true;
5137 return false;
5140 /* VSX supports word-aligned vector. */
5141 if (misalignment % 4 == 0)
5142 return true;
5144 return false;
5147 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5148 static int
5149 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5150 tree vectype, int misalign)
5152 unsigned elements;
5153 tree elem_type;
5155 switch (type_of_cost)
5157 case scalar_stmt:
5158 case scalar_load:
5159 case scalar_store:
5160 case vector_stmt:
5161 case vector_load:
5162 case vector_store:
5163 case vec_to_scalar:
5164 case scalar_to_vec:
5165 case cond_branch_not_taken:
5166 return 1;
5168 case vec_perm:
5169 if (TARGET_VSX)
5170 return 3;
5171 else
5172 return 1;
5174 case vec_promote_demote:
5175 if (TARGET_VSX)
5176 return 4;
5177 else
5178 return 1;
5180 case cond_branch_taken:
5181 return 3;
5183 case unaligned_load:
5184 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5185 return 1;
5187 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5189 elements = TYPE_VECTOR_SUBPARTS (vectype);
5190 if (elements == 2)
5191 /* Double word aligned. */
5192 return 2;
5194 if (elements == 4)
5196 switch (misalign)
5198 case 8:
5199 /* Double word aligned. */
5200 return 2;
5202 case -1:
5203 /* Unknown misalignment. */
5204 case 4:
5205 case 12:
5206 /* Word aligned. */
5207 return 22;
5209 default:
5210 gcc_unreachable ();
5215 if (TARGET_ALTIVEC)
5216 /* Misaligned loads are not supported. */
5217 gcc_unreachable ();
5219 return 2;
5221 case unaligned_store:
5222 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5223 return 1;
5225 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5227 elements = TYPE_VECTOR_SUBPARTS (vectype);
5228 if (elements == 2)
5229 /* Double word aligned. */
5230 return 2;
5232 if (elements == 4)
5234 switch (misalign)
5236 case 8:
5237 /* Double word aligned. */
5238 return 2;
5240 case -1:
5241 /* Unknown misalignment. */
5242 case 4:
5243 case 12:
5244 /* Word aligned. */
5245 return 23;
5247 default:
5248 gcc_unreachable ();
5253 if (TARGET_ALTIVEC)
5254 /* Misaligned stores are not supported. */
5255 gcc_unreachable ();
5257 return 2;
5259 case vec_construct:
5260 elements = TYPE_VECTOR_SUBPARTS (vectype);
5261 elem_type = TREE_TYPE (vectype);
5262 /* 32-bit vectors loaded into registers are stored as double
5263 precision, so we need n/2 converts in addition to the usual
5264 n/2 merges to construct a vector of short floats from them. */
5265 if (SCALAR_FLOAT_TYPE_P (elem_type)
5266 && TYPE_PRECISION (elem_type) == 32)
5267 return elements + 1;
5268 else
5269 return elements / 2 + 1;
5271 default:
5272 gcc_unreachable ();
5276 /* Implement targetm.vectorize.preferred_simd_mode. */
5278 static machine_mode
5279 rs6000_preferred_simd_mode (machine_mode mode)
5281 if (TARGET_VSX)
5282 switch (mode)
5284 case DFmode:
5285 return V2DFmode;
5286 default:;
5288 if (TARGET_ALTIVEC || TARGET_VSX)
5289 switch (mode)
5291 case SFmode:
5292 return V4SFmode;
5293 case TImode:
5294 return V1TImode;
5295 case DImode:
5296 return V2DImode;
5297 case SImode:
5298 return V4SImode;
5299 case HImode:
5300 return V8HImode;
5301 case QImode:
5302 return V16QImode;
5303 default:;
5305 if (TARGET_SPE)
5306 switch (mode)
5308 case SFmode:
5309 return V2SFmode;
5310 case SImode:
5311 return V2SImode;
5312 default:;
5314 if (TARGET_PAIRED_FLOAT
5315 && mode == SFmode)
5316 return V2SFmode;
5317 return word_mode;
5320 typedef struct _rs6000_cost_data
5322 struct loop *loop_info;
5323 unsigned cost[3];
5324 } rs6000_cost_data;
5326 /* Test for likely overcommitment of vector hardware resources. If a
5327 loop iteration is relatively large, and too large a percentage of
5328 instructions in the loop are vectorized, the cost model may not
5329 adequately reflect delays from unavailable vector resources.
5330 Penalize the loop body cost for this case. */
5332 static void
5333 rs6000_density_test (rs6000_cost_data *data)
5335 const int DENSITY_PCT_THRESHOLD = 85;
5336 const int DENSITY_SIZE_THRESHOLD = 70;
5337 const int DENSITY_PENALTY = 10;
5338 struct loop *loop = data->loop_info;
5339 basic_block *bbs = get_loop_body (loop);
5340 int nbbs = loop->num_nodes;
5341 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5342 int i, density_pct;
5344 for (i = 0; i < nbbs; i++)
5346 basic_block bb = bbs[i];
5347 gimple_stmt_iterator gsi;
5349 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5351 gimple *stmt = gsi_stmt (gsi);
5352 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5354 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5355 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5356 not_vec_cost++;
5360 free (bbs);
5361 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5363 if (density_pct > DENSITY_PCT_THRESHOLD
5364 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5366 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5367 if (dump_enabled_p ())
5368 dump_printf_loc (MSG_NOTE, vect_location,
5369 "density %d%%, cost %d exceeds threshold, penalizing "
5370 "loop body cost by %d%%", density_pct,
5371 vec_cost + not_vec_cost, DENSITY_PENALTY);
5375 /* Implement targetm.vectorize.init_cost. */
5377 static void *
5378 rs6000_init_cost (struct loop *loop_info)
5380 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5381 data->loop_info = loop_info;
5382 data->cost[vect_prologue] = 0;
5383 data->cost[vect_body] = 0;
5384 data->cost[vect_epilogue] = 0;
5385 return data;
5388 /* Implement targetm.vectorize.add_stmt_cost. */
5390 static unsigned
5391 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5392 struct _stmt_vec_info *stmt_info, int misalign,
5393 enum vect_cost_model_location where)
5395 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5396 unsigned retval = 0;
5398 if (flag_vect_cost_model)
5400 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5401 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5402 misalign);
5403 /* Statements in an inner loop relative to the loop being
5404 vectorized are weighted more heavily. The value here is
5405 arbitrary and could potentially be improved with analysis. */
5406 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5407 count *= 50; /* FIXME. */
5409 retval = (unsigned) (count * stmt_cost);
5410 cost_data->cost[where] += retval;
5413 return retval;
5416 /* Implement targetm.vectorize.finish_cost. */
5418 static void
5419 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5420 unsigned *body_cost, unsigned *epilogue_cost)
5422 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5424 if (cost_data->loop_info)
5425 rs6000_density_test (cost_data);
5427 *prologue_cost = cost_data->cost[vect_prologue];
5428 *body_cost = cost_data->cost[vect_body];
5429 *epilogue_cost = cost_data->cost[vect_epilogue];
5432 /* Implement targetm.vectorize.destroy_cost_data. */
5434 static void
5435 rs6000_destroy_cost_data (void *data)
5437 free (data);
5440 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5441 library with vectorized intrinsics. */
5443 static tree
5444 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5445 tree type_in)
5447 char name[32];
5448 const char *suffix = NULL;
5449 tree fntype, new_fndecl, bdecl = NULL_TREE;
5450 int n_args = 1;
5451 const char *bname;
5452 machine_mode el_mode, in_mode;
5453 int n, in_n;
5455 /* Libmass is suitable for unsafe math only as it does not correctly support
5456 parts of IEEE with the required precision such as denormals. Only support
5457 it if we have VSX to use the simd d2 or f4 functions.
5458 XXX: Add variable length support. */
5459 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5460 return NULL_TREE;
5462 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5463 n = TYPE_VECTOR_SUBPARTS (type_out);
5464 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5465 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5466 if (el_mode != in_mode
5467 || n != in_n)
5468 return NULL_TREE;
5470 switch (fn)
5472 CASE_CFN_ATAN2:
5473 CASE_CFN_HYPOT:
5474 CASE_CFN_POW:
5475 n_args = 2;
5476 /* fall through */
5478 CASE_CFN_ACOS:
5479 CASE_CFN_ACOSH:
5480 CASE_CFN_ASIN:
5481 CASE_CFN_ASINH:
5482 CASE_CFN_ATAN:
5483 CASE_CFN_ATANH:
5484 CASE_CFN_CBRT:
5485 CASE_CFN_COS:
5486 CASE_CFN_COSH:
5487 CASE_CFN_ERF:
5488 CASE_CFN_ERFC:
5489 CASE_CFN_EXP2:
5490 CASE_CFN_EXP:
5491 CASE_CFN_EXPM1:
5492 CASE_CFN_LGAMMA:
5493 CASE_CFN_LOG10:
5494 CASE_CFN_LOG1P:
5495 CASE_CFN_LOG2:
5496 CASE_CFN_LOG:
5497 CASE_CFN_SIN:
5498 CASE_CFN_SINH:
5499 CASE_CFN_SQRT:
5500 CASE_CFN_TAN:
5501 CASE_CFN_TANH:
5502 if (el_mode == DFmode && n == 2)
5504 bdecl = mathfn_built_in (double_type_node, fn);
5505 suffix = "d2"; /* pow -> powd2 */
5507 else if (el_mode == SFmode && n == 4)
5509 bdecl = mathfn_built_in (float_type_node, fn);
5510 suffix = "4"; /* powf -> powf4 */
5512 else
5513 return NULL_TREE;
5514 if (!bdecl)
5515 return NULL_TREE;
5516 break;
5518 default:
5519 return NULL_TREE;
5522 gcc_assert (suffix != NULL);
5523 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5524 if (!bname)
5525 return NULL_TREE;
5527 strcpy (name, bname + sizeof ("__builtin_") - 1);
5528 strcat (name, suffix);
5530 if (n_args == 1)
5531 fntype = build_function_type_list (type_out, type_in, NULL);
5532 else if (n_args == 2)
5533 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5534 else
5535 gcc_unreachable ();
5537 /* Build a function declaration for the vectorized function. */
5538 new_fndecl = build_decl (BUILTINS_LOCATION,
5539 FUNCTION_DECL, get_identifier (name), fntype);
5540 TREE_PUBLIC (new_fndecl) = 1;
5541 DECL_EXTERNAL (new_fndecl) = 1;
5542 DECL_IS_NOVOPS (new_fndecl) = 1;
5543 TREE_READONLY (new_fndecl) = 1;
5545 return new_fndecl;
5548 /* Returns a function decl for a vectorized version of the builtin function
5549 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5550 if it is not available. */
5552 static tree
5553 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5554 tree type_in)
5556 machine_mode in_mode, out_mode;
5557 int in_n, out_n;
5559 if (TARGET_DEBUG_BUILTIN)
5560 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5561 combined_fn_name (combined_fn (fn)),
5562 GET_MODE_NAME (TYPE_MODE (type_out)),
5563 GET_MODE_NAME (TYPE_MODE (type_in)));
5565 if (TREE_CODE (type_out) != VECTOR_TYPE
5566 || TREE_CODE (type_in) != VECTOR_TYPE
5567 || !TARGET_VECTORIZE_BUILTINS)
5568 return NULL_TREE;
5570 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5571 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5572 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5573 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5575 switch (fn)
5577 CASE_CFN_COPYSIGN:
5578 if (VECTOR_UNIT_VSX_P (V2DFmode)
5579 && out_mode == DFmode && out_n == 2
5580 && in_mode == DFmode && in_n == 2)
5581 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5582 if (VECTOR_UNIT_VSX_P (V4SFmode)
5583 && out_mode == SFmode && out_n == 4
5584 && in_mode == SFmode && in_n == 4)
5585 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5586 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5587 && out_mode == SFmode && out_n == 4
5588 && in_mode == SFmode && in_n == 4)
5589 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5590 break;
5591 CASE_CFN_CEIL:
5592 if (VECTOR_UNIT_VSX_P (V2DFmode)
5593 && out_mode == DFmode && out_n == 2
5594 && in_mode == DFmode && in_n == 2)
5595 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5596 if (VECTOR_UNIT_VSX_P (V4SFmode)
5597 && out_mode == SFmode && out_n == 4
5598 && in_mode == SFmode && in_n == 4)
5599 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5600 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5601 && out_mode == SFmode && out_n == 4
5602 && in_mode == SFmode && in_n == 4)
5603 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5604 break;
5605 CASE_CFN_FLOOR:
5606 if (VECTOR_UNIT_VSX_P (V2DFmode)
5607 && out_mode == DFmode && out_n == 2
5608 && in_mode == DFmode && in_n == 2)
5609 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5610 if (VECTOR_UNIT_VSX_P (V4SFmode)
5611 && out_mode == SFmode && out_n == 4
5612 && in_mode == SFmode && in_n == 4)
5613 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5614 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5615 && out_mode == SFmode && out_n == 4
5616 && in_mode == SFmode && in_n == 4)
5617 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5618 break;
5619 CASE_CFN_FMA:
5620 if (VECTOR_UNIT_VSX_P (V2DFmode)
5621 && out_mode == DFmode && out_n == 2
5622 && in_mode == DFmode && in_n == 2)
5623 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5624 if (VECTOR_UNIT_VSX_P (V4SFmode)
5625 && out_mode == SFmode && out_n == 4
5626 && in_mode == SFmode && in_n == 4)
5627 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5628 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5629 && out_mode == SFmode && out_n == 4
5630 && in_mode == SFmode && in_n == 4)
5631 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5632 break;
5633 CASE_CFN_TRUNC:
5634 if (VECTOR_UNIT_VSX_P (V2DFmode)
5635 && out_mode == DFmode && out_n == 2
5636 && in_mode == DFmode && in_n == 2)
5637 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5638 if (VECTOR_UNIT_VSX_P (V4SFmode)
5639 && out_mode == SFmode && out_n == 4
5640 && in_mode == SFmode && in_n == 4)
5641 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5642 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5643 && out_mode == SFmode && out_n == 4
5644 && in_mode == SFmode && in_n == 4)
5645 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5646 break;
5647 CASE_CFN_NEARBYINT:
5648 if (VECTOR_UNIT_VSX_P (V2DFmode)
5649 && flag_unsafe_math_optimizations
5650 && out_mode == DFmode && out_n == 2
5651 && in_mode == DFmode && in_n == 2)
5652 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5653 if (VECTOR_UNIT_VSX_P (V4SFmode)
5654 && flag_unsafe_math_optimizations
5655 && out_mode == SFmode && out_n == 4
5656 && in_mode == SFmode && in_n == 4)
5657 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5658 break;
5659 CASE_CFN_RINT:
5660 if (VECTOR_UNIT_VSX_P (V2DFmode)
5661 && !flag_trapping_math
5662 && out_mode == DFmode && out_n == 2
5663 && in_mode == DFmode && in_n == 2)
5664 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5665 if (VECTOR_UNIT_VSX_P (V4SFmode)
5666 && !flag_trapping_math
5667 && out_mode == SFmode && out_n == 4
5668 && in_mode == SFmode && in_n == 4)
5669 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5670 break;
5671 default:
5672 break;
5675 /* Generate calls to libmass if appropriate. */
5676 if (rs6000_veclib_handler)
5677 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5679 return NULL_TREE;
5682 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5684 static tree
5685 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5686 tree type_in)
5688 machine_mode in_mode, out_mode;
5689 int in_n, out_n;
5691 if (TARGET_DEBUG_BUILTIN)
5692 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5693 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5694 GET_MODE_NAME (TYPE_MODE (type_out)),
5695 GET_MODE_NAME (TYPE_MODE (type_in)));
5697 if (TREE_CODE (type_out) != VECTOR_TYPE
5698 || TREE_CODE (type_in) != VECTOR_TYPE
5699 || !TARGET_VECTORIZE_BUILTINS)
5700 return NULL_TREE;
5702 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5703 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5704 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5705 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5707 enum rs6000_builtins fn
5708 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5709 switch (fn)
5711 case RS6000_BUILTIN_RSQRTF:
5712 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5713 && out_mode == SFmode && out_n == 4
5714 && in_mode == SFmode && in_n == 4)
5715 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5716 break;
5717 case RS6000_BUILTIN_RSQRT:
5718 if (VECTOR_UNIT_VSX_P (V2DFmode)
5719 && out_mode == DFmode && out_n == 2
5720 && in_mode == DFmode && in_n == 2)
5721 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5722 break;
5723 case RS6000_BUILTIN_RECIPF:
5724 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5725 && out_mode == SFmode && out_n == 4
5726 && in_mode == SFmode && in_n == 4)
5727 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5728 break;
5729 case RS6000_BUILTIN_RECIP:
5730 if (VECTOR_UNIT_VSX_P (V2DFmode)
5731 && out_mode == DFmode && out_n == 2
5732 && in_mode == DFmode && in_n == 2)
5733 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5734 break;
5735 default:
5736 break;
5738 return NULL_TREE;
5741 /* Default CPU string for rs6000*_file_start functions. */
5742 static const char *rs6000_default_cpu;
5744 /* Do anything needed at the start of the asm file. */
5746 static void
5747 rs6000_file_start (void)
5749 char buffer[80];
5750 const char *start = buffer;
5751 FILE *file = asm_out_file;
5753 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5755 default_file_start ();
5757 if (flag_verbose_asm)
5759 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5761 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5763 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5764 start = "";
5767 if (global_options_set.x_rs6000_cpu_index)
5769 fprintf (file, "%s -mcpu=%s", start,
5770 processor_target_table[rs6000_cpu_index].name);
5771 start = "";
5774 if (global_options_set.x_rs6000_tune_index)
5776 fprintf (file, "%s -mtune=%s", start,
5777 processor_target_table[rs6000_tune_index].name);
5778 start = "";
5781 if (PPC405_ERRATUM77)
5783 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5784 start = "";
5787 #ifdef USING_ELFOS_H
5788 switch (rs6000_sdata)
5790 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5791 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5792 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5793 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5796 if (rs6000_sdata && g_switch_value)
5798 fprintf (file, "%s -G %d", start,
5799 g_switch_value);
5800 start = "";
5802 #endif
5804 if (*start == '\0')
5805 putc ('\n', file);
5808 #ifdef USING_ELFOS_H
5809 if (rs6000_default_cpu == 0 || rs6000_default_cpu[0] == '\0'
5810 || !global_options_set.x_rs6000_cpu_index)
5812 fputs ("\t.machine ", asm_out_file);
5813 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5814 fputs ("power9\n", asm_out_file);
5815 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5816 fputs ("power8\n", asm_out_file);
5817 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5818 fputs ("power7\n", asm_out_file);
5819 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5820 fputs ("power6\n", asm_out_file);
5821 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5822 fputs ("power5\n", asm_out_file);
5823 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5824 fputs ("power4\n", asm_out_file);
5825 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5826 fputs ("ppc64\n", asm_out_file);
5827 else
5828 fputs ("ppc\n", asm_out_file);
5830 #endif
5832 if (DEFAULT_ABI == ABI_ELFv2)
5833 fprintf (file, "\t.abiversion 2\n");
5837 /* Return nonzero if this function is known to have a null epilogue. */
5840 direct_return (void)
5842 if (reload_completed)
5844 rs6000_stack_t *info = rs6000_stack_info ();
5846 if (info->first_gp_reg_save == 32
5847 && info->first_fp_reg_save == 64
5848 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5849 && ! info->lr_save_p
5850 && ! info->cr_save_p
5851 && info->vrsave_size == 0
5852 && ! info->push_p)
5853 return 1;
5856 return 0;
5859 /* Return the number of instructions it takes to form a constant in an
5860 integer register. */
5863 num_insns_constant_wide (HOST_WIDE_INT value)
5865 /* signed constant loadable with addi */
5866 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5867 return 1;
5869 /* constant loadable with addis */
5870 else if ((value & 0xffff) == 0
5871 && (value >> 31 == -1 || value >> 31 == 0))
5872 return 1;
5874 else if (TARGET_POWERPC64)
5876 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5877 HOST_WIDE_INT high = value >> 31;
5879 if (high == 0 || high == -1)
5880 return 2;
5882 high >>= 1;
5884 if (low == 0)
5885 return num_insns_constant_wide (high) + 1;
5886 else if (high == 0)
5887 return num_insns_constant_wide (low) + 1;
5888 else
5889 return (num_insns_constant_wide (high)
5890 + num_insns_constant_wide (low) + 1);
5893 else
5894 return 2;
5898 num_insns_constant (rtx op, machine_mode mode)
5900 HOST_WIDE_INT low, high;
5902 switch (GET_CODE (op))
5904 case CONST_INT:
5905 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5906 && rs6000_is_valid_and_mask (op, mode))
5907 return 2;
5908 else
5909 return num_insns_constant_wide (INTVAL (op));
5911 case CONST_WIDE_INT:
5913 int i;
5914 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5915 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5916 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5917 return ins;
5920 case CONST_DOUBLE:
5921 if (mode == SFmode || mode == SDmode)
5923 long l;
5925 if (DECIMAL_FLOAT_MODE_P (mode))
5926 REAL_VALUE_TO_TARGET_DECIMAL32
5927 (*CONST_DOUBLE_REAL_VALUE (op), l);
5928 else
5929 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5930 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5933 long l[2];
5934 if (DECIMAL_FLOAT_MODE_P (mode))
5935 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5936 else
5937 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5938 high = l[WORDS_BIG_ENDIAN == 0];
5939 low = l[WORDS_BIG_ENDIAN != 0];
5941 if (TARGET_32BIT)
5942 return (num_insns_constant_wide (low)
5943 + num_insns_constant_wide (high));
5944 else
5946 if ((high == 0 && low >= 0)
5947 || (high == -1 && low < 0))
5948 return num_insns_constant_wide (low);
5950 else if (rs6000_is_valid_and_mask (op, mode))
5951 return 2;
5953 else if (low == 0)
5954 return num_insns_constant_wide (high) + 1;
5956 else
5957 return (num_insns_constant_wide (high)
5958 + num_insns_constant_wide (low) + 1);
5961 default:
5962 gcc_unreachable ();
5966 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5967 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5968 corresponding element of the vector, but for V4SFmode and V2SFmode,
5969 the corresponding "float" is interpreted as an SImode integer. */
5971 HOST_WIDE_INT
5972 const_vector_elt_as_int (rtx op, unsigned int elt)
5974 rtx tmp;
5976 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5977 gcc_assert (GET_MODE (op) != V2DImode
5978 && GET_MODE (op) != V2DFmode);
5980 tmp = CONST_VECTOR_ELT (op, elt);
5981 if (GET_MODE (op) == V4SFmode
5982 || GET_MODE (op) == V2SFmode)
5983 tmp = gen_lowpart (SImode, tmp);
5984 return INTVAL (tmp);
5987 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5988 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5989 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5990 all items are set to the same value and contain COPIES replicas of the
5991 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5992 operand and the others are set to the value of the operand's msb. */
5994 static bool
5995 vspltis_constant (rtx op, unsigned step, unsigned copies)
5997 machine_mode mode = GET_MODE (op);
5998 machine_mode inner = GET_MODE_INNER (mode);
6000 unsigned i;
6001 unsigned nunits;
6002 unsigned bitsize;
6003 unsigned mask;
6005 HOST_WIDE_INT val;
6006 HOST_WIDE_INT splat_val;
6007 HOST_WIDE_INT msb_val;
6009 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6010 return false;
6012 nunits = GET_MODE_NUNITS (mode);
6013 bitsize = GET_MODE_BITSIZE (inner);
6014 mask = GET_MODE_MASK (inner);
6016 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6017 splat_val = val;
6018 msb_val = val >= 0 ? 0 : -1;
6020 /* Construct the value to be splatted, if possible. If not, return 0. */
6021 for (i = 2; i <= copies; i *= 2)
6023 HOST_WIDE_INT small_val;
6024 bitsize /= 2;
6025 small_val = splat_val >> bitsize;
6026 mask >>= bitsize;
6027 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
6028 return false;
6029 splat_val = small_val;
6032 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6033 if (EASY_VECTOR_15 (splat_val))
6036 /* Also check if we can splat, and then add the result to itself. Do so if
6037 the value is positive, of if the splat instruction is using OP's mode;
6038 for splat_val < 0, the splat and the add should use the same mode. */
6039 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6040 && (splat_val >= 0 || (step == 1 && copies == 1)))
6043 /* Also check if are loading up the most significant bit which can be done by
6044 loading up -1 and shifting the value left by -1. */
6045 else if (EASY_VECTOR_MSB (splat_val, inner))
6048 else
6049 return false;
6051 /* Check if VAL is present in every STEP-th element, and the
6052 other elements are filled with its most significant bit. */
6053 for (i = 1; i < nunits; ++i)
6055 HOST_WIDE_INT desired_val;
6056 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6057 if ((i & (step - 1)) == 0)
6058 desired_val = val;
6059 else
6060 desired_val = msb_val;
6062 if (desired_val != const_vector_elt_as_int (op, elt))
6063 return false;
6066 return true;
6069 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6070 instruction, filling in the bottom elements with 0 or -1.
6072 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6073 for the number of zeroes to shift in, or negative for the number of 0xff
6074 bytes to shift in.
6076 OP is a CONST_VECTOR. */
6079 vspltis_shifted (rtx op)
6081 machine_mode mode = GET_MODE (op);
6082 machine_mode inner = GET_MODE_INNER (mode);
6084 unsigned i, j;
6085 unsigned nunits;
6086 unsigned mask;
6088 HOST_WIDE_INT val;
6090 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6091 return false;
6093 /* We need to create pseudo registers to do the shift, so don't recognize
6094 shift vector constants after reload. */
6095 if (!can_create_pseudo_p ())
6096 return false;
6098 nunits = GET_MODE_NUNITS (mode);
6099 mask = GET_MODE_MASK (inner);
6101 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6103 /* Check if the value can really be the operand of a vspltis[bhw]. */
6104 if (EASY_VECTOR_15 (val))
6107 /* Also check if we are loading up the most significant bit which can be done
6108 by loading up -1 and shifting the value left by -1. */
6109 else if (EASY_VECTOR_MSB (val, inner))
6112 else
6113 return 0;
6115 /* Check if VAL is present in every STEP-th element until we find elements
6116 that are 0 or all 1 bits. */
6117 for (i = 1; i < nunits; ++i)
6119 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6120 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6122 /* If the value isn't the splat value, check for the remaining elements
6123 being 0/-1. */
6124 if (val != elt_val)
6126 if (elt_val == 0)
6128 for (j = i+1; j < nunits; ++j)
6130 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6131 if (const_vector_elt_as_int (op, elt2) != 0)
6132 return 0;
6135 return (nunits - i) * GET_MODE_SIZE (inner);
6138 else if ((elt_val & mask) == mask)
6140 for (j = i+1; j < nunits; ++j)
6142 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6143 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6144 return 0;
6147 return -((nunits - i) * GET_MODE_SIZE (inner));
6150 else
6151 return 0;
6155 /* If all elements are equal, we don't need to do VLSDOI. */
6156 return 0;
6160 /* Return true if OP is of the given MODE and can be synthesized
6161 with a vspltisb, vspltish or vspltisw. */
6163 bool
6164 easy_altivec_constant (rtx op, machine_mode mode)
6166 unsigned step, copies;
6168 if (mode == VOIDmode)
6169 mode = GET_MODE (op);
6170 else if (mode != GET_MODE (op))
6171 return false;
6173 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6174 constants. */
6175 if (mode == V2DFmode)
6176 return zero_constant (op, mode);
6178 else if (mode == V2DImode)
6180 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6181 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6182 return false;
6184 if (zero_constant (op, mode))
6185 return true;
6187 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6188 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6189 return true;
6191 return false;
6194 /* V1TImode is a special container for TImode. Ignore for now. */
6195 else if (mode == V1TImode)
6196 return false;
6198 /* Start with a vspltisw. */
6199 step = GET_MODE_NUNITS (mode) / 4;
6200 copies = 1;
6202 if (vspltis_constant (op, step, copies))
6203 return true;
6205 /* Then try with a vspltish. */
6206 if (step == 1)
6207 copies <<= 1;
6208 else
6209 step >>= 1;
6211 if (vspltis_constant (op, step, copies))
6212 return true;
6214 /* And finally a vspltisb. */
6215 if (step == 1)
6216 copies <<= 1;
6217 else
6218 step >>= 1;
6220 if (vspltis_constant (op, step, copies))
6221 return true;
6223 if (vspltis_shifted (op) != 0)
6224 return true;
6226 return false;
6229 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6230 result is OP. Abort if it is not possible. */
6233 gen_easy_altivec_constant (rtx op)
6235 machine_mode mode = GET_MODE (op);
6236 int nunits = GET_MODE_NUNITS (mode);
6237 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6238 unsigned step = nunits / 4;
6239 unsigned copies = 1;
6241 /* Start with a vspltisw. */
6242 if (vspltis_constant (op, step, copies))
6243 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6245 /* Then try with a vspltish. */
6246 if (step == 1)
6247 copies <<= 1;
6248 else
6249 step >>= 1;
6251 if (vspltis_constant (op, step, copies))
6252 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6254 /* And finally a vspltisb. */
6255 if (step == 1)
6256 copies <<= 1;
6257 else
6258 step >>= 1;
6260 if (vspltis_constant (op, step, copies))
6261 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6263 gcc_unreachable ();
6266 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6267 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6269 Return the number of instructions needed (1 or 2) into the address pointed
6270 via NUM_INSNS_PTR.
6272 Return the constant that is being split via CONSTANT_PTR. */
6274 bool
6275 xxspltib_constant_p (rtx op,
6276 machine_mode mode,
6277 int *num_insns_ptr,
6278 int *constant_ptr)
6280 size_t nunits = GET_MODE_NUNITS (mode);
6281 size_t i;
6282 HOST_WIDE_INT value;
6283 rtx element;
6285 /* Set the returned values to out of bound values. */
6286 *num_insns_ptr = -1;
6287 *constant_ptr = 256;
6289 if (!TARGET_P9_VECTOR)
6290 return false;
6292 if (mode == VOIDmode)
6293 mode = GET_MODE (op);
6295 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6296 return false;
6298 /* Handle (vec_duplicate <constant>). */
6299 if (GET_CODE (op) == VEC_DUPLICATE)
6301 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6302 && mode != V2DImode)
6303 return false;
6305 element = XEXP (op, 0);
6306 if (!CONST_INT_P (element))
6307 return false;
6309 value = INTVAL (element);
6310 if (!IN_RANGE (value, -128, 127))
6311 return false;
6314 /* Handle (const_vector [...]). */
6315 else if (GET_CODE (op) == CONST_VECTOR)
6317 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6318 && mode != V2DImode)
6319 return false;
6321 element = CONST_VECTOR_ELT (op, 0);
6322 if (!CONST_INT_P (element))
6323 return false;
6325 value = INTVAL (element);
6326 if (!IN_RANGE (value, -128, 127))
6327 return false;
6329 for (i = 1; i < nunits; i++)
6331 element = CONST_VECTOR_ELT (op, i);
6332 if (!CONST_INT_P (element))
6333 return false;
6335 if (value != INTVAL (element))
6336 return false;
6340 /* Handle integer constants being loaded into the upper part of the VSX
6341 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6342 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6343 else if (CONST_INT_P (op))
6345 if (!SCALAR_INT_MODE_P (mode))
6346 return false;
6348 value = INTVAL (op);
6349 if (!IN_RANGE (value, -128, 127))
6350 return false;
6352 if (!IN_RANGE (value, -1, 0))
6354 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6355 return false;
6357 if (EASY_VECTOR_15 (value))
6358 return false;
6362 else
6363 return false;
6365 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6366 sign extend. Special case 0/-1 to allow getting any VSX register instead
6367 of an Altivec register. */
6368 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6369 && EASY_VECTOR_15 (value))
6370 return false;
6372 /* Return # of instructions and the constant byte for XXSPLTIB. */
6373 if (mode == V16QImode)
6374 *num_insns_ptr = 1;
6376 else if (IN_RANGE (value, -1, 0))
6377 *num_insns_ptr = 1;
6379 else
6380 *num_insns_ptr = 2;
6382 *constant_ptr = (int) value;
6383 return true;
6386 const char *
6387 output_vec_const_move (rtx *operands)
6389 int cst, cst2, shift;
6390 machine_mode mode;
6391 rtx dest, vec;
6393 dest = operands[0];
6394 vec = operands[1];
6395 mode = GET_MODE (dest);
6397 if (TARGET_VSX)
6399 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6400 int xxspltib_value = 256;
6401 int num_insns = -1;
6403 if (zero_constant (vec, mode))
6405 if (TARGET_P9_VECTOR)
6406 return "xxspltib %x0,0";
6408 else if (dest_vmx_p)
6409 return "vspltisw %0,0";
6411 else
6412 return "xxlxor %x0,%x0,%x0";
6415 if (all_ones_constant (vec, mode))
6417 if (TARGET_P9_VECTOR)
6418 return "xxspltib %x0,255";
6420 else if (dest_vmx_p)
6421 return "vspltisw %0,-1";
6423 else if (TARGET_P8_VECTOR)
6424 return "xxlorc %x0,%x0,%x0";
6426 else
6427 gcc_unreachable ();
6430 if (TARGET_P9_VECTOR
6431 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6433 if (num_insns == 1)
6435 operands[2] = GEN_INT (xxspltib_value & 0xff);
6436 return "xxspltib %x0,%2";
6439 return "#";
6443 if (TARGET_ALTIVEC)
6445 rtx splat_vec;
6447 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6448 if (zero_constant (vec, mode))
6449 return "vspltisw %0,0";
6451 if (all_ones_constant (vec, mode))
6452 return "vspltisw %0,-1";
6454 /* Do we need to construct a value using VSLDOI? */
6455 shift = vspltis_shifted (vec);
6456 if (shift != 0)
6457 return "#";
6459 splat_vec = gen_easy_altivec_constant (vec);
6460 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6461 operands[1] = XEXP (splat_vec, 0);
6462 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6463 return "#";
6465 switch (GET_MODE (splat_vec))
6467 case V4SImode:
6468 return "vspltisw %0,%1";
6470 case V8HImode:
6471 return "vspltish %0,%1";
6473 case V16QImode:
6474 return "vspltisb %0,%1";
6476 default:
6477 gcc_unreachable ();
6481 gcc_assert (TARGET_SPE);
6483 /* Vector constant 0 is handled as a splitter of V2SI, and in the
6484 pattern of V1DI, V4HI, and V2SF.
6486 FIXME: We should probably return # and add post reload
6487 splitters for these, but this way is so easy ;-). */
6488 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
6489 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
6490 operands[1] = CONST_VECTOR_ELT (vec, 0);
6491 operands[2] = CONST_VECTOR_ELT (vec, 1);
6492 if (cst == cst2)
6493 return "li %0,%1\n\tevmergelo %0,%0,%0";
6494 else if (WORDS_BIG_ENDIAN)
6495 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
6496 else
6497 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
6500 /* Initialize TARGET of vector PAIRED to VALS. */
6502 void
6503 paired_expand_vector_init (rtx target, rtx vals)
6505 machine_mode mode = GET_MODE (target);
6506 int n_elts = GET_MODE_NUNITS (mode);
6507 int n_var = 0;
6508 rtx x, new_rtx, tmp, constant_op, op1, op2;
6509 int i;
6511 for (i = 0; i < n_elts; ++i)
6513 x = XVECEXP (vals, 0, i);
6514 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6515 ++n_var;
6517 if (n_var == 0)
6519 /* Load from constant pool. */
6520 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6521 return;
6524 if (n_var == 2)
6526 /* The vector is initialized only with non-constants. */
6527 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6528 XVECEXP (vals, 0, 1));
6530 emit_move_insn (target, new_rtx);
6531 return;
6534 /* One field is non-constant and the other one is a constant. Load the
6535 constant from the constant pool and use ps_merge instruction to
6536 construct the whole vector. */
6537 op1 = XVECEXP (vals, 0, 0);
6538 op2 = XVECEXP (vals, 0, 1);
6540 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6542 tmp = gen_reg_rtx (GET_MODE (constant_op));
6543 emit_move_insn (tmp, constant_op);
6545 if (CONSTANT_P (op1))
6546 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6547 else
6548 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6550 emit_move_insn (target, new_rtx);
6553 void
6554 paired_expand_vector_move (rtx operands[])
6556 rtx op0 = operands[0], op1 = operands[1];
6558 emit_move_insn (op0, op1);
6561 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6562 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6563 operands for the relation operation COND. This is a recursive
6564 function. */
6566 static void
6567 paired_emit_vector_compare (enum rtx_code rcode,
6568 rtx dest, rtx op0, rtx op1,
6569 rtx cc_op0, rtx cc_op1)
6571 rtx tmp = gen_reg_rtx (V2SFmode);
6572 rtx tmp1, max, min;
6574 gcc_assert (TARGET_PAIRED_FLOAT);
6575 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6577 switch (rcode)
6579 case LT:
6580 case LTU:
6581 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6582 return;
6583 case GE:
6584 case GEU:
6585 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6586 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6587 return;
6588 case LE:
6589 case LEU:
6590 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6591 return;
6592 case GT:
6593 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6594 return;
6595 case EQ:
6596 tmp1 = gen_reg_rtx (V2SFmode);
6597 max = gen_reg_rtx (V2SFmode);
6598 min = gen_reg_rtx (V2SFmode);
6599 gen_reg_rtx (V2SFmode);
6601 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6602 emit_insn (gen_selv2sf4
6603 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6604 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6605 emit_insn (gen_selv2sf4
6606 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6607 emit_insn (gen_subv2sf3 (tmp1, min, max));
6608 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6609 return;
6610 case NE:
6611 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6612 return;
6613 case UNLE:
6614 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6615 return;
6616 case UNLT:
6617 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6618 return;
6619 case UNGE:
6620 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6621 return;
6622 case UNGT:
6623 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6624 return;
6625 default:
6626 gcc_unreachable ();
6629 return;
6632 /* Emit vector conditional expression.
6633 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6634 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6637 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6638 rtx cond, rtx cc_op0, rtx cc_op1)
6640 enum rtx_code rcode = GET_CODE (cond);
6642 if (!TARGET_PAIRED_FLOAT)
6643 return 0;
6645 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6647 return 1;
6650 /* Initialize vector TARGET to VALS. */
6652 void
6653 rs6000_expand_vector_init (rtx target, rtx vals)
6655 machine_mode mode = GET_MODE (target);
6656 machine_mode inner_mode = GET_MODE_INNER (mode);
6657 int n_elts = GET_MODE_NUNITS (mode);
6658 int n_var = 0, one_var = -1;
6659 bool all_same = true, all_const_zero = true;
6660 rtx x, mem;
6661 int i;
6663 for (i = 0; i < n_elts; ++i)
6665 x = XVECEXP (vals, 0, i);
6666 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6667 ++n_var, one_var = i;
6668 else if (x != CONST0_RTX (inner_mode))
6669 all_const_zero = false;
6671 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6672 all_same = false;
6675 if (n_var == 0)
6677 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6678 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6679 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6681 /* Zero register. */
6682 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (mode, target, target)));
6683 return;
6685 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6687 /* Splat immediate. */
6688 emit_insn (gen_rtx_SET (target, const_vec));
6689 return;
6691 else
6693 /* Load from constant pool. */
6694 emit_move_insn (target, const_vec);
6695 return;
6699 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6700 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6702 rtx op0 = XVECEXP (vals, 0, 0);
6703 rtx op1 = XVECEXP (vals, 0, 1);
6704 if (all_same)
6706 if (!MEM_P (op0) && !REG_P (op0))
6707 op0 = force_reg (inner_mode, op0);
6708 if (mode == V2DFmode)
6709 emit_insn (gen_vsx_splat_v2df (target, op0));
6710 else
6711 emit_insn (gen_vsx_splat_v2di (target, op0));
6713 else
6715 op0 = force_reg (inner_mode, op0);
6716 op1 = force_reg (inner_mode, op1);
6717 if (mode == V2DFmode)
6718 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
6719 else
6720 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
6722 return;
6725 /* Word values on ISA 3.0 can use mtvsrws, lxvwsx, or vspltisw. V4SF is
6726 complicated since scalars are stored as doubles in the registers. */
6727 if (TARGET_P9_VECTOR && mode == V4SImode && all_same
6728 && VECTOR_MEM_VSX_P (mode))
6730 emit_insn (gen_vsx_splat_v4si (target, XVECEXP (vals, 0, 0)));
6731 return;
6734 /* With single precision floating point on VSX, know that internally single
6735 precision is actually represented as a double, and either make 2 V2DF
6736 vectors, and convert these vectors to single precision, or do one
6737 conversion, and splat the result to the other elements. */
6738 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
6740 if (all_same)
6742 rtx op0 = XVECEXP (vals, 0, 0);
6744 if (TARGET_P9_VECTOR)
6745 emit_insn (gen_vsx_splat_v4sf (target, op0));
6747 else
6749 rtx freg = gen_reg_rtx (V4SFmode);
6750 rtx sreg = force_reg (SFmode, op0);
6751 rtx cvt = (TARGET_XSCVDPSPN
6752 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6753 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6755 emit_insn (cvt);
6756 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6757 const0_rtx));
6760 else
6762 rtx dbl_even = gen_reg_rtx (V2DFmode);
6763 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6764 rtx flt_even = gen_reg_rtx (V4SFmode);
6765 rtx flt_odd = gen_reg_rtx (V4SFmode);
6766 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6767 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6768 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6769 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6771 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6772 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6773 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6774 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6775 rs6000_expand_extract_even (target, flt_even, flt_odd);
6777 return;
6780 /* Store value to stack temp. Load vector element. Splat. However, splat
6781 of 64-bit items is not supported on Altivec. */
6782 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6784 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6785 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6786 XVECEXP (vals, 0, 0));
6787 x = gen_rtx_UNSPEC (VOIDmode,
6788 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6789 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6790 gen_rtvec (2,
6791 gen_rtx_SET (target, mem),
6792 x)));
6793 x = gen_rtx_VEC_SELECT (inner_mode, target,
6794 gen_rtx_PARALLEL (VOIDmode,
6795 gen_rtvec (1, const0_rtx)));
6796 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6797 return;
6800 /* One field is non-constant. Load constant then overwrite
6801 varying field. */
6802 if (n_var == 1)
6804 rtx copy = copy_rtx (vals);
6806 /* Load constant part of vector, substitute neighboring value for
6807 varying element. */
6808 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6809 rs6000_expand_vector_init (target, copy);
6811 /* Insert variable. */
6812 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6813 return;
6816 /* Construct the vector in memory one field at a time
6817 and load the whole vector. */
6818 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6819 for (i = 0; i < n_elts; i++)
6820 emit_move_insn (adjust_address_nv (mem, inner_mode,
6821 i * GET_MODE_SIZE (inner_mode)),
6822 XVECEXP (vals, 0, i));
6823 emit_move_insn (target, mem);
6826 /* Set field ELT of TARGET to VAL. */
6828 void
6829 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6831 machine_mode mode = GET_MODE (target);
6832 machine_mode inner_mode = GET_MODE_INNER (mode);
6833 rtx reg = gen_reg_rtx (mode);
6834 rtx mask, mem, x;
6835 int width = GET_MODE_SIZE (inner_mode);
6836 int i;
6838 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6840 rtx (*set_func) (rtx, rtx, rtx, rtx)
6841 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
6842 emit_insn (set_func (target, target, val, GEN_INT (elt)));
6843 return;
6846 /* Simplify setting single element vectors like V1TImode. */
6847 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6849 emit_move_insn (target, gen_lowpart (mode, val));
6850 return;
6853 /* Load single variable value. */
6854 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6855 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6856 x = gen_rtx_UNSPEC (VOIDmode,
6857 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6858 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6859 gen_rtvec (2,
6860 gen_rtx_SET (reg, mem),
6861 x)));
6863 /* Linear sequence. */
6864 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6865 for (i = 0; i < 16; ++i)
6866 XVECEXP (mask, 0, i) = GEN_INT (i);
6868 /* Set permute mask to insert element into target. */
6869 for (i = 0; i < width; ++i)
6870 XVECEXP (mask, 0, elt*width + i)
6871 = GEN_INT (i + 0x10);
6872 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6874 if (BYTES_BIG_ENDIAN)
6875 x = gen_rtx_UNSPEC (mode,
6876 gen_rtvec (3, target, reg,
6877 force_reg (V16QImode, x)),
6878 UNSPEC_VPERM);
6879 else
6881 if (TARGET_P9_VECTOR)
6882 x = gen_rtx_UNSPEC (mode,
6883 gen_rtvec (3, target, reg,
6884 force_reg (V16QImode, x)),
6885 UNSPEC_VPERMR);
6886 else
6888 /* Invert selector. We prefer to generate VNAND on P8 so
6889 that future fusion opportunities can kick in, but must
6890 generate VNOR elsewhere. */
6891 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6892 rtx iorx = (TARGET_P8_VECTOR
6893 ? gen_rtx_IOR (V16QImode, notx, notx)
6894 : gen_rtx_AND (V16QImode, notx, notx));
6895 rtx tmp = gen_reg_rtx (V16QImode);
6896 emit_insn (gen_rtx_SET (tmp, iorx));
6898 /* Permute with operands reversed and adjusted selector. */
6899 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6900 UNSPEC_VPERM);
6904 emit_insn (gen_rtx_SET (target, x));
6907 /* Extract field ELT from VEC into TARGET. */
6909 void
6910 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
6912 machine_mode mode = GET_MODE (vec);
6913 machine_mode inner_mode = GET_MODE_INNER (mode);
6914 rtx mem;
6916 if (VECTOR_MEM_VSX_P (mode))
6918 switch (mode)
6920 default:
6921 break;
6922 case V1TImode:
6923 gcc_assert (elt == 0 && inner_mode == TImode);
6924 emit_move_insn (target, gen_lowpart (TImode, vec));
6925 break;
6926 case V2DFmode:
6927 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
6928 return;
6929 case V2DImode:
6930 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
6931 return;
6932 case V4SFmode:
6933 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
6934 return;
6938 /* Allocate mode-sized buffer. */
6939 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6941 emit_move_insn (mem, vec);
6943 /* Add offset to field within buffer matching vector element. */
6944 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
6946 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6949 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
6951 bool
6952 invalid_e500_subreg (rtx op, machine_mode mode)
6954 if (TARGET_E500_DOUBLE)
6956 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
6957 subreg:TI and reg:TF. Decimal float modes are like integer
6958 modes (only low part of each register used) for this
6959 purpose. */
6960 if (GET_CODE (op) == SUBREG
6961 && (mode == SImode || mode == DImode || mode == TImode
6962 || mode == DDmode || mode == TDmode || mode == PTImode)
6963 && REG_P (SUBREG_REG (op))
6964 && (GET_MODE (SUBREG_REG (op)) == DFmode
6965 || GET_MODE (SUBREG_REG (op)) == TFmode
6966 || GET_MODE (SUBREG_REG (op)) == IFmode
6967 || GET_MODE (SUBREG_REG (op)) == KFmode))
6968 return true;
6970 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
6971 reg:TI. */
6972 if (GET_CODE (op) == SUBREG
6973 && (mode == DFmode || mode == TFmode || mode == IFmode
6974 || mode == KFmode)
6975 && REG_P (SUBREG_REG (op))
6976 && (GET_MODE (SUBREG_REG (op)) == DImode
6977 || GET_MODE (SUBREG_REG (op)) == TImode
6978 || GET_MODE (SUBREG_REG (op)) == PTImode
6979 || GET_MODE (SUBREG_REG (op)) == DDmode
6980 || GET_MODE (SUBREG_REG (op)) == TDmode))
6981 return true;
6984 if (TARGET_SPE
6985 && GET_CODE (op) == SUBREG
6986 && mode == SImode
6987 && REG_P (SUBREG_REG (op))
6988 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
6989 return true;
6991 return false;
6994 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
6995 selects whether the alignment is abi mandated, optional, or
6996 both abi and optional alignment. */
6998 unsigned int
6999 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7001 if (how != align_opt)
7003 if (TREE_CODE (type) == VECTOR_TYPE)
7005 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
7006 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
7008 if (align < 64)
7009 align = 64;
7011 else if (align < 128)
7012 align = 128;
7014 else if (TARGET_E500_DOUBLE
7015 && TREE_CODE (type) == REAL_TYPE
7016 && TYPE_MODE (type) == DFmode)
7018 if (align < 64)
7019 align = 64;
7023 if (how != align_abi)
7025 if (TREE_CODE (type) == ARRAY_TYPE
7026 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7028 if (align < BITS_PER_WORD)
7029 align = BITS_PER_WORD;
7033 return align;
7036 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7038 bool
7039 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
7041 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7043 if (computed != 128)
7045 static bool warned;
7046 if (!warned && warn_psabi)
7048 warned = true;
7049 inform (input_location,
7050 "the layout of aggregates containing vectors with"
7051 " %d-byte alignment has changed in GCC 5",
7052 computed / BITS_PER_UNIT);
7055 /* In current GCC there is no special case. */
7056 return false;
7059 return false;
7062 /* AIX increases natural record alignment to doubleword if the first
7063 field is an FP double while the FP fields remain word aligned. */
7065 unsigned int
7066 rs6000_special_round_type_align (tree type, unsigned int computed,
7067 unsigned int specified)
7069 unsigned int align = MAX (computed, specified);
7070 tree field = TYPE_FIELDS (type);
7072 /* Skip all non field decls */
7073 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7074 field = DECL_CHAIN (field);
7076 if (field != NULL && field != type)
7078 type = TREE_TYPE (field);
7079 while (TREE_CODE (type) == ARRAY_TYPE)
7080 type = TREE_TYPE (type);
7082 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7083 align = MAX (align, 64);
7086 return align;
7089 /* Darwin increases record alignment to the natural alignment of
7090 the first field. */
7092 unsigned int
7093 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7094 unsigned int specified)
7096 unsigned int align = MAX (computed, specified);
7098 if (TYPE_PACKED (type))
7099 return align;
7101 /* Find the first field, looking down into aggregates. */
7102 do {
7103 tree field = TYPE_FIELDS (type);
7104 /* Skip all non field decls */
7105 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7106 field = DECL_CHAIN (field);
7107 if (! field)
7108 break;
7109 /* A packed field does not contribute any extra alignment. */
7110 if (DECL_PACKED (field))
7111 return align;
7112 type = TREE_TYPE (field);
7113 while (TREE_CODE (type) == ARRAY_TYPE)
7114 type = TREE_TYPE (type);
7115 } while (AGGREGATE_TYPE_P (type));
7117 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7118 align = MAX (align, TYPE_ALIGN (type));
7120 return align;
7123 /* Return 1 for an operand in small memory on V.4/eabi. */
7126 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7127 machine_mode mode ATTRIBUTE_UNUSED)
7129 #if TARGET_ELF
7130 rtx sym_ref;
7132 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7133 return 0;
7135 if (DEFAULT_ABI != ABI_V4)
7136 return 0;
7138 /* Vector and float memory instructions have a limited offset on the
7139 SPE, so using a vector or float variable directly as an operand is
7140 not useful. */
7141 if (TARGET_SPE
7142 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
7143 return 0;
7145 if (GET_CODE (op) == SYMBOL_REF)
7146 sym_ref = op;
7148 else if (GET_CODE (op) != CONST
7149 || GET_CODE (XEXP (op, 0)) != PLUS
7150 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7151 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7152 return 0;
7154 else
7156 rtx sum = XEXP (op, 0);
7157 HOST_WIDE_INT summand;
7159 /* We have to be careful here, because it is the referenced address
7160 that must be 32k from _SDA_BASE_, not just the symbol. */
7161 summand = INTVAL (XEXP (sum, 1));
7162 if (summand < 0 || summand > g_switch_value)
7163 return 0;
7165 sym_ref = XEXP (sum, 0);
7168 return SYMBOL_REF_SMALL_P (sym_ref);
7169 #else
7170 return 0;
7171 #endif
7174 /* Return true if either operand is a general purpose register. */
7176 bool
7177 gpr_or_gpr_p (rtx op0, rtx op1)
7179 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7180 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7183 /* Return true if this is a move direct operation between GPR registers and
7184 floating point/VSX registers. */
7186 bool
7187 direct_move_p (rtx op0, rtx op1)
7189 int regno0, regno1;
7191 if (!REG_P (op0) || !REG_P (op1))
7192 return false;
7194 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7195 return false;
7197 regno0 = REGNO (op0);
7198 regno1 = REGNO (op1);
7199 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7200 return false;
7202 if (INT_REGNO_P (regno0))
7203 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7205 else if (INT_REGNO_P (regno1))
7207 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7208 return true;
7210 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7211 return true;
7214 return false;
7217 /* Return true if the OFFSET is valid for the quad address instructions that
7218 use d-form (register + offset) addressing. */
7220 static inline bool
7221 quad_address_offset_p (HOST_WIDE_INT offset)
7223 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7226 /* Return true if the ADDR is an acceptable address for a quad memory
7227 operation of mode MODE (either LQ/STQ for general purpose registers, or
7228 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7229 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7230 3.0 LXV/STXV instruction. */
7232 bool
7233 quad_address_p (rtx addr, machine_mode mode, bool strict)
7235 rtx op0, op1;
7237 if (GET_MODE_SIZE (mode) != 16)
7238 return false;
7240 if (legitimate_indirect_address_p (addr, strict))
7241 return true;
7243 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
7244 return false;
7246 if (GET_CODE (addr) != PLUS)
7247 return false;
7249 op0 = XEXP (addr, 0);
7250 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7251 return false;
7253 op1 = XEXP (addr, 1);
7254 if (!CONST_INT_P (op1))
7255 return false;
7257 return quad_address_offset_p (INTVAL (op1));
7260 /* Return true if this is a load or store quad operation. This function does
7261 not handle the atomic quad memory instructions. */
7263 bool
7264 quad_load_store_p (rtx op0, rtx op1)
7266 bool ret;
7268 if (!TARGET_QUAD_MEMORY)
7269 ret = false;
7271 else if (REG_P (op0) && MEM_P (op1))
7272 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7273 && quad_memory_operand (op1, GET_MODE (op1))
7274 && !reg_overlap_mentioned_p (op0, op1));
7276 else if (MEM_P (op0) && REG_P (op1))
7277 ret = (quad_memory_operand (op0, GET_MODE (op0))
7278 && quad_int_reg_operand (op1, GET_MODE (op1)));
7280 else
7281 ret = false;
7283 if (TARGET_DEBUG_ADDR)
7285 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7286 ret ? "true" : "false");
7287 debug_rtx (gen_rtx_SET (op0, op1));
7290 return ret;
7293 /* Given an address, return a constant offset term if one exists. */
7295 static rtx
7296 address_offset (rtx op)
7298 if (GET_CODE (op) == PRE_INC
7299 || GET_CODE (op) == PRE_DEC)
7300 op = XEXP (op, 0);
7301 else if (GET_CODE (op) == PRE_MODIFY
7302 || GET_CODE (op) == LO_SUM)
7303 op = XEXP (op, 1);
7305 if (GET_CODE (op) == CONST)
7306 op = XEXP (op, 0);
7308 if (GET_CODE (op) == PLUS)
7309 op = XEXP (op, 1);
7311 if (CONST_INT_P (op))
7312 return op;
7314 return NULL_RTX;
7317 /* Return true if the MEM operand is a memory operand suitable for use
7318 with a (full width, possibly multiple) gpr load/store. On
7319 powerpc64 this means the offset must be divisible by 4.
7320 Implements 'Y' constraint.
7322 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7323 a constraint function we know the operand has satisfied a suitable
7324 memory predicate. Also accept some odd rtl generated by reload
7325 (see rs6000_legitimize_reload_address for various forms). It is
7326 important that reload rtl be accepted by appropriate constraints
7327 but not by the operand predicate.
7329 Offsetting a lo_sum should not be allowed, except where we know by
7330 alignment that a 32k boundary is not crossed, but see the ???
7331 comment in rs6000_legitimize_reload_address. Note that by
7332 "offsetting" here we mean a further offset to access parts of the
7333 MEM. It's fine to have a lo_sum where the inner address is offset
7334 from a sym, since the same sym+offset will appear in the high part
7335 of the address calculation. */
7337 bool
7338 mem_operand_gpr (rtx op, machine_mode mode)
7340 unsigned HOST_WIDE_INT offset;
7341 int extra;
7342 rtx addr = XEXP (op, 0);
7344 op = address_offset (addr);
7345 if (op == NULL_RTX)
7346 return true;
7348 offset = INTVAL (op);
7349 if (TARGET_POWERPC64 && (offset & 3) != 0)
7350 return false;
7352 if (mode_supports_vsx_dform_quad (mode)
7353 && !quad_address_offset_p (offset))
7354 return false;
7356 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7357 if (extra < 0)
7358 extra = 0;
7360 if (GET_CODE (addr) == LO_SUM)
7361 /* For lo_sum addresses, we must allow any offset except one that
7362 causes a wrap, so test only the low 16 bits. */
7363 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7365 return offset + 0x8000 < 0x10000u - extra;
7368 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7370 static bool
7371 reg_offset_addressing_ok_p (machine_mode mode)
7373 switch (mode)
7375 case V16QImode:
7376 case V8HImode:
7377 case V4SFmode:
7378 case V4SImode:
7379 case V2DFmode:
7380 case V2DImode:
7381 case V1TImode:
7382 case TImode:
7383 case TFmode:
7384 case KFmode:
7385 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7386 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7387 a vector mode, if we want to use the VSX registers to move it around,
7388 we need to restrict ourselves to reg+reg addressing. Similarly for
7389 IEEE 128-bit floating point that is passed in a single vector
7390 register. */
7391 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7392 return mode_supports_vsx_dform_quad (mode);
7393 break;
7395 case V4HImode:
7396 case V2SImode:
7397 case V1DImode:
7398 case V2SFmode:
7399 /* Paired vector modes. Only reg+reg addressing is valid. */
7400 if (TARGET_PAIRED_FLOAT)
7401 return false;
7402 break;
7404 case SDmode:
7405 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7406 addressing for the LFIWZX and STFIWX instructions. */
7407 if (TARGET_NO_SDMODE_STACK)
7408 return false;
7409 break;
7411 default:
7412 break;
7415 return true;
7418 static bool
7419 virtual_stack_registers_memory_p (rtx op)
7421 int regnum;
7423 if (GET_CODE (op) == REG)
7424 regnum = REGNO (op);
7426 else if (GET_CODE (op) == PLUS
7427 && GET_CODE (XEXP (op, 0)) == REG
7428 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7429 regnum = REGNO (XEXP (op, 0));
7431 else
7432 return false;
7434 return (regnum >= FIRST_VIRTUAL_REGISTER
7435 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7438 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7439 is known to not straddle a 32k boundary. This function is used
7440 to determine whether -mcmodel=medium code can use TOC pointer
7441 relative addressing for OP. This means the alignment of the TOC
7442 pointer must also be taken into account, and unfortunately that is
7443 only 8 bytes. */
7445 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7446 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7447 #endif
7449 static bool
7450 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7451 machine_mode mode)
7453 tree decl;
7454 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7456 if (GET_CODE (op) != SYMBOL_REF)
7457 return false;
7459 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7460 SYMBOL_REF. */
7461 if (mode_supports_vsx_dform_quad (mode))
7462 return false;
7464 dsize = GET_MODE_SIZE (mode);
7465 decl = SYMBOL_REF_DECL (op);
7466 if (!decl)
7468 if (dsize == 0)
7469 return false;
7471 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7472 replacing memory addresses with an anchor plus offset. We
7473 could find the decl by rummaging around in the block->objects
7474 VEC for the given offset but that seems like too much work. */
7475 dalign = BITS_PER_UNIT;
7476 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7477 && SYMBOL_REF_ANCHOR_P (op)
7478 && SYMBOL_REF_BLOCK (op) != NULL)
7480 struct object_block *block = SYMBOL_REF_BLOCK (op);
7482 dalign = block->alignment;
7483 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7485 else if (CONSTANT_POOL_ADDRESS_P (op))
7487 /* It would be nice to have get_pool_align().. */
7488 machine_mode cmode = get_pool_mode (op);
7490 dalign = GET_MODE_ALIGNMENT (cmode);
7493 else if (DECL_P (decl))
7495 dalign = DECL_ALIGN (decl);
7497 if (dsize == 0)
7499 /* Allow BLKmode when the entire object is known to not
7500 cross a 32k boundary. */
7501 if (!DECL_SIZE_UNIT (decl))
7502 return false;
7504 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7505 return false;
7507 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7508 if (dsize > 32768)
7509 return false;
7511 dalign /= BITS_PER_UNIT;
7512 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7513 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7514 return dalign >= dsize;
7517 else
7518 gcc_unreachable ();
7520 /* Find how many bits of the alignment we know for this access. */
7521 dalign /= BITS_PER_UNIT;
7522 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7523 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7524 mask = dalign - 1;
7525 lsb = offset & -offset;
7526 mask &= lsb - 1;
7527 dalign = mask + 1;
7529 return dalign >= dsize;
7532 static bool
7533 constant_pool_expr_p (rtx op)
7535 rtx base, offset;
7537 split_const (op, &base, &offset);
7538 return (GET_CODE (base) == SYMBOL_REF
7539 && CONSTANT_POOL_ADDRESS_P (base)
7540 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7543 static const_rtx tocrel_base, tocrel_offset;
7545 /* Return true if OP is a toc pointer relative address (the output
7546 of create_TOC_reference). If STRICT, do not match high part or
7547 non-split -mcmodel=large/medium toc pointer relative addresses. */
7549 bool
7550 toc_relative_expr_p (const_rtx op, bool strict)
7552 if (!TARGET_TOC)
7553 return false;
7555 if (TARGET_CMODEL != CMODEL_SMALL)
7557 /* Only match the low part. */
7558 if (GET_CODE (op) == LO_SUM
7559 && REG_P (XEXP (op, 0))
7560 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
7561 op = XEXP (op, 1);
7562 else if (strict)
7563 return false;
7566 tocrel_base = op;
7567 tocrel_offset = const0_rtx;
7568 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7570 tocrel_base = XEXP (op, 0);
7571 tocrel_offset = XEXP (op, 1);
7574 return (GET_CODE (tocrel_base) == UNSPEC
7575 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
7578 /* Return true if X is a constant pool address, and also for cmodel=medium
7579 if X is a toc-relative address known to be offsettable within MODE. */
7581 bool
7582 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7583 bool strict)
7585 return (toc_relative_expr_p (x, strict)
7586 && (TARGET_CMODEL != CMODEL_MEDIUM
7587 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7588 || mode == QImode
7589 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7590 INTVAL (tocrel_offset), mode)));
7593 static bool
7594 legitimate_small_data_p (machine_mode mode, rtx x)
7596 return (DEFAULT_ABI == ABI_V4
7597 && !flag_pic && !TARGET_TOC
7598 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7599 && small_data_operand (x, mode));
7602 /* SPE offset addressing is limited to 5-bits worth of double words. */
7603 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
7605 bool
7606 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7607 bool strict, bool worst_case)
7609 unsigned HOST_WIDE_INT offset;
7610 unsigned int extra;
7612 if (GET_CODE (x) != PLUS)
7613 return false;
7614 if (!REG_P (XEXP (x, 0)))
7615 return false;
7616 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7617 return false;
7618 if (mode_supports_vsx_dform_quad (mode))
7619 return quad_address_p (x, mode, strict);
7620 if (!reg_offset_addressing_ok_p (mode))
7621 return virtual_stack_registers_memory_p (x);
7622 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7623 return true;
7624 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7625 return false;
7627 offset = INTVAL (XEXP (x, 1));
7628 extra = 0;
7629 switch (mode)
7631 case V4HImode:
7632 case V2SImode:
7633 case V1DImode:
7634 case V2SFmode:
7635 /* SPE vector modes. */
7636 return SPE_CONST_OFFSET_OK (offset);
7638 case DFmode:
7639 case DDmode:
7640 case DImode:
7641 /* On e500v2, we may have:
7643 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
7645 Which gets addressed with evldd instructions. */
7646 if (TARGET_E500_DOUBLE)
7647 return SPE_CONST_OFFSET_OK (offset);
7649 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7650 addressing. */
7651 if (VECTOR_MEM_VSX_P (mode))
7652 return false;
7654 if (!worst_case)
7655 break;
7656 if (!TARGET_POWERPC64)
7657 extra = 4;
7658 else if (offset & 3)
7659 return false;
7660 break;
7662 case TFmode:
7663 case IFmode:
7664 case KFmode:
7665 if (TARGET_E500_DOUBLE)
7666 return (SPE_CONST_OFFSET_OK (offset)
7667 && SPE_CONST_OFFSET_OK (offset + 8));
7668 /* fall through */
7670 case TDmode:
7671 case TImode:
7672 case PTImode:
7673 extra = 8;
7674 if (!worst_case)
7675 break;
7676 if (!TARGET_POWERPC64)
7677 extra = 12;
7678 else if (offset & 3)
7679 return false;
7680 break;
7682 default:
7683 break;
7686 offset += 0x8000;
7687 return offset < 0x10000 - extra;
7690 bool
7691 legitimate_indexed_address_p (rtx x, int strict)
7693 rtx op0, op1;
7695 if (GET_CODE (x) != PLUS)
7696 return false;
7698 op0 = XEXP (x, 0);
7699 op1 = XEXP (x, 1);
7701 /* Recognize the rtl generated by reload which we know will later be
7702 replaced with proper base and index regs. */
7703 if (!strict
7704 && reload_in_progress
7705 && (REG_P (op0) || GET_CODE (op0) == PLUS)
7706 && REG_P (op1))
7707 return true;
7709 return (REG_P (op0) && REG_P (op1)
7710 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
7711 && INT_REG_OK_FOR_INDEX_P (op1, strict))
7712 || (INT_REG_OK_FOR_BASE_P (op1, strict)
7713 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
7716 bool
7717 avoiding_indexed_address_p (machine_mode mode)
7719 /* Avoid indexed addressing for modes that have non-indexed
7720 load/store instruction forms. */
7721 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
7724 bool
7725 legitimate_indirect_address_p (rtx x, int strict)
7727 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
7730 bool
7731 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
7733 if (!TARGET_MACHO || !flag_pic
7734 || mode != SImode || GET_CODE (x) != MEM)
7735 return false;
7736 x = XEXP (x, 0);
7738 if (GET_CODE (x) != LO_SUM)
7739 return false;
7740 if (GET_CODE (XEXP (x, 0)) != REG)
7741 return false;
7742 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
7743 return false;
7744 x = XEXP (x, 1);
7746 return CONSTANT_P (x);
7749 static bool
7750 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
7752 if (GET_CODE (x) != LO_SUM)
7753 return false;
7754 if (GET_CODE (XEXP (x, 0)) != REG)
7755 return false;
7756 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7757 return false;
7758 /* quad word addresses are restricted, and we can't use LO_SUM. */
7759 if (mode_supports_vsx_dform_quad (mode))
7760 return false;
7761 /* Restrict addressing for DI because of our SUBREG hackery. */
7762 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7763 return false;
7764 x = XEXP (x, 1);
7766 if (TARGET_ELF || TARGET_MACHO)
7768 bool large_toc_ok;
7770 if (DEFAULT_ABI == ABI_V4 && flag_pic)
7771 return false;
7772 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
7773 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
7774 recognizes some LO_SUM addresses as valid although this
7775 function says opposite. In most cases, LRA through different
7776 transformations can generate correct code for address reloads.
7777 It can not manage only some LO_SUM cases. So we need to add
7778 code analogous to one in rs6000_legitimize_reload_address for
7779 LOW_SUM here saying that some addresses are still valid. */
7780 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
7781 && small_toc_ref (x, VOIDmode));
7782 if (TARGET_TOC && ! large_toc_ok)
7783 return false;
7784 if (GET_MODE_NUNITS (mode) != 1)
7785 return false;
7786 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
7787 && !(/* ??? Assume floating point reg based on mode? */
7788 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
7789 && (mode == DFmode || mode == DDmode)))
7790 return false;
7792 return CONSTANT_P (x) || large_toc_ok;
7795 return false;
7799 /* Try machine-dependent ways of modifying an illegitimate address
7800 to be legitimate. If we find one, return the new, valid address.
7801 This is used from only one place: `memory_address' in explow.c.
7803 OLDX is the address as it was before break_out_memory_refs was
7804 called. In some cases it is useful to look at this to decide what
7805 needs to be done.
7807 It is always safe for this function to do nothing. It exists to
7808 recognize opportunities to optimize the output.
7810 On RS/6000, first check for the sum of a register with a constant
7811 integer that is out of range. If so, generate code to add the
7812 constant with the low-order 16 bits masked to the register and force
7813 this result into another register (this can be done with `cau').
7814 Then generate an address of REG+(CONST&0xffff), allowing for the
7815 possibility of bit 16 being a one.
7817 Then check for the sum of a register and something not constant, try to
7818 load the other things into a register and return the sum. */
7820 static rtx
7821 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
7822 machine_mode mode)
7824 unsigned int extra;
7826 if (!reg_offset_addressing_ok_p (mode)
7827 || mode_supports_vsx_dform_quad (mode))
7829 if (virtual_stack_registers_memory_p (x))
7830 return x;
7832 /* In theory we should not be seeing addresses of the form reg+0,
7833 but just in case it is generated, optimize it away. */
7834 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
7835 return force_reg (Pmode, XEXP (x, 0));
7837 /* For TImode with load/store quad, restrict addresses to just a single
7838 pointer, so it works with both GPRs and VSX registers. */
7839 /* Make sure both operands are registers. */
7840 else if (GET_CODE (x) == PLUS
7841 && (mode != TImode || !TARGET_QUAD_MEMORY))
7842 return gen_rtx_PLUS (Pmode,
7843 force_reg (Pmode, XEXP (x, 0)),
7844 force_reg (Pmode, XEXP (x, 1)));
7845 else
7846 return force_reg (Pmode, x);
7848 if (GET_CODE (x) == SYMBOL_REF)
7850 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
7851 if (model != 0)
7852 return rs6000_legitimize_tls_address (x, model);
7855 extra = 0;
7856 switch (mode)
7858 case TFmode:
7859 case TDmode:
7860 case TImode:
7861 case PTImode:
7862 case IFmode:
7863 case KFmode:
7864 /* As in legitimate_offset_address_p we do not assume
7865 worst-case. The mode here is just a hint as to the registers
7866 used. A TImode is usually in gprs, but may actually be in
7867 fprs. Leave worst-case scenario for reload to handle via
7868 insn constraints. PTImode is only GPRs. */
7869 extra = 8;
7870 break;
7871 default:
7872 break;
7875 if (GET_CODE (x) == PLUS
7876 && GET_CODE (XEXP (x, 0)) == REG
7877 && GET_CODE (XEXP (x, 1)) == CONST_INT
7878 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
7879 >= 0x10000 - extra)
7880 && !(SPE_VECTOR_MODE (mode)
7881 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
7883 HOST_WIDE_INT high_int, low_int;
7884 rtx sum;
7885 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
7886 if (low_int >= 0x8000 - extra)
7887 low_int = 0;
7888 high_int = INTVAL (XEXP (x, 1)) - low_int;
7889 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
7890 GEN_INT (high_int)), 0);
7891 return plus_constant (Pmode, sum, low_int);
7893 else if (GET_CODE (x) == PLUS
7894 && GET_CODE (XEXP (x, 0)) == REG
7895 && GET_CODE (XEXP (x, 1)) != CONST_INT
7896 && GET_MODE_NUNITS (mode) == 1
7897 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7898 || (/* ??? Assume floating point reg based on mode? */
7899 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7900 && (mode == DFmode || mode == DDmode)))
7901 && !avoiding_indexed_address_p (mode))
7903 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
7904 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
7906 else if (SPE_VECTOR_MODE (mode)
7907 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
7909 if (mode == DImode)
7910 return x;
7911 /* We accept [reg + reg] and [reg + OFFSET]. */
7913 if (GET_CODE (x) == PLUS)
7915 rtx op1 = XEXP (x, 0);
7916 rtx op2 = XEXP (x, 1);
7917 rtx y;
7919 op1 = force_reg (Pmode, op1);
7921 if (GET_CODE (op2) != REG
7922 && (GET_CODE (op2) != CONST_INT
7923 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
7924 || (GET_MODE_SIZE (mode) > 8
7925 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
7926 op2 = force_reg (Pmode, op2);
7928 /* We can't always do [reg + reg] for these, because [reg +
7929 reg + offset] is not a legitimate addressing mode. */
7930 y = gen_rtx_PLUS (Pmode, op1, op2);
7932 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
7933 return force_reg (Pmode, y);
7934 else
7935 return y;
7938 return force_reg (Pmode, x);
7940 else if ((TARGET_ELF
7941 #if TARGET_MACHO
7942 || !MACHO_DYNAMIC_NO_PIC_P
7943 #endif
7945 && TARGET_32BIT
7946 && TARGET_NO_TOC
7947 && ! flag_pic
7948 && GET_CODE (x) != CONST_INT
7949 && GET_CODE (x) != CONST_WIDE_INT
7950 && GET_CODE (x) != CONST_DOUBLE
7951 && CONSTANT_P (x)
7952 && GET_MODE_NUNITS (mode) == 1
7953 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
7954 || (/* ??? Assume floating point reg based on mode? */
7955 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7956 && (mode == DFmode || mode == DDmode))))
7958 rtx reg = gen_reg_rtx (Pmode);
7959 if (TARGET_ELF)
7960 emit_insn (gen_elf_high (reg, x));
7961 else
7962 emit_insn (gen_macho_high (reg, x));
7963 return gen_rtx_LO_SUM (Pmode, reg, x);
7965 else if (TARGET_TOC
7966 && GET_CODE (x) == SYMBOL_REF
7967 && constant_pool_expr_p (x)
7968 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
7969 return create_TOC_reference (x, NULL_RTX);
7970 else
7971 return x;
7974 /* Debug version of rs6000_legitimize_address. */
7975 static rtx
7976 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
7978 rtx ret;
7979 rtx_insn *insns;
7981 start_sequence ();
7982 ret = rs6000_legitimize_address (x, oldx, mode);
7983 insns = get_insns ();
7984 end_sequence ();
7986 if (ret != x)
7988 fprintf (stderr,
7989 "\nrs6000_legitimize_address: mode %s, old code %s, "
7990 "new code %s, modified\n",
7991 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
7992 GET_RTX_NAME (GET_CODE (ret)));
7994 fprintf (stderr, "Original address:\n");
7995 debug_rtx (x);
7997 fprintf (stderr, "oldx:\n");
7998 debug_rtx (oldx);
8000 fprintf (stderr, "New address:\n");
8001 debug_rtx (ret);
8003 if (insns)
8005 fprintf (stderr, "Insns added:\n");
8006 debug_rtx_list (insns, 20);
8009 else
8011 fprintf (stderr,
8012 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8013 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8015 debug_rtx (x);
8018 if (insns)
8019 emit_insn (insns);
8021 return ret;
8024 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8025 We need to emit DTP-relative relocations. */
8027 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8028 static void
8029 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8031 switch (size)
8033 case 4:
8034 fputs ("\t.long\t", file);
8035 break;
8036 case 8:
8037 fputs (DOUBLE_INT_ASM_OP, file);
8038 break;
8039 default:
8040 gcc_unreachable ();
8042 output_addr_const (file, x);
8043 if (TARGET_ELF)
8044 fputs ("@dtprel+0x8000", file);
8045 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8047 switch (SYMBOL_REF_TLS_MODEL (x))
8049 case 0:
8050 break;
8051 case TLS_MODEL_LOCAL_EXEC:
8052 fputs ("@le", file);
8053 break;
8054 case TLS_MODEL_INITIAL_EXEC:
8055 fputs ("@ie", file);
8056 break;
8057 case TLS_MODEL_GLOBAL_DYNAMIC:
8058 case TLS_MODEL_LOCAL_DYNAMIC:
8059 fputs ("@m", file);
8060 break;
8061 default:
8062 gcc_unreachable ();
8067 /* Return true if X is a symbol that refers to real (rather than emulated)
8068 TLS. */
8070 static bool
8071 rs6000_real_tls_symbol_ref_p (rtx x)
8073 return (GET_CODE (x) == SYMBOL_REF
8074 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8077 /* In the name of slightly smaller debug output, and to cater to
8078 general assembler lossage, recognize various UNSPEC sequences
8079 and turn them back into a direct symbol reference. */
8081 static rtx
8082 rs6000_delegitimize_address (rtx orig_x)
8084 rtx x, y, offset;
8086 orig_x = delegitimize_mem_from_attrs (orig_x);
8087 x = orig_x;
8088 if (MEM_P (x))
8089 x = XEXP (x, 0);
8091 y = x;
8092 if (TARGET_CMODEL != CMODEL_SMALL
8093 && GET_CODE (y) == LO_SUM)
8094 y = XEXP (y, 1);
8096 offset = NULL_RTX;
8097 if (GET_CODE (y) == PLUS
8098 && GET_MODE (y) == Pmode
8099 && CONST_INT_P (XEXP (y, 1)))
8101 offset = XEXP (y, 1);
8102 y = XEXP (y, 0);
8105 if (GET_CODE (y) == UNSPEC
8106 && XINT (y, 1) == UNSPEC_TOCREL)
8108 y = XVECEXP (y, 0, 0);
8110 #ifdef HAVE_AS_TLS
8111 /* Do not associate thread-local symbols with the original
8112 constant pool symbol. */
8113 if (TARGET_XCOFF
8114 && GET_CODE (y) == SYMBOL_REF
8115 && CONSTANT_POOL_ADDRESS_P (y)
8116 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8117 return orig_x;
8118 #endif
8120 if (offset != NULL_RTX)
8121 y = gen_rtx_PLUS (Pmode, y, offset);
8122 if (!MEM_P (orig_x))
8123 return y;
8124 else
8125 return replace_equiv_address_nv (orig_x, y);
8128 if (TARGET_MACHO
8129 && GET_CODE (orig_x) == LO_SUM
8130 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8132 y = XEXP (XEXP (orig_x, 1), 0);
8133 if (GET_CODE (y) == UNSPEC
8134 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8135 return XVECEXP (y, 0, 0);
8138 return orig_x;
8141 /* Return true if X shouldn't be emitted into the debug info.
8142 The linker doesn't like .toc section references from
8143 .debug_* sections, so reject .toc section symbols. */
8145 static bool
8146 rs6000_const_not_ok_for_debug_p (rtx x)
8148 if (GET_CODE (x) == SYMBOL_REF
8149 && CONSTANT_POOL_ADDRESS_P (x))
8151 rtx c = get_pool_constant (x);
8152 machine_mode cmode = get_pool_mode (x);
8153 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8154 return true;
8157 return false;
8160 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8162 static GTY(()) rtx rs6000_tls_symbol;
8163 static rtx
8164 rs6000_tls_get_addr (void)
8166 if (!rs6000_tls_symbol)
8167 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8169 return rs6000_tls_symbol;
8172 /* Construct the SYMBOL_REF for TLS GOT references. */
8174 static GTY(()) rtx rs6000_got_symbol;
8175 static rtx
8176 rs6000_got_sym (void)
8178 if (!rs6000_got_symbol)
8180 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8181 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8182 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8185 return rs6000_got_symbol;
8188 /* AIX Thread-Local Address support. */
8190 static rtx
8191 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8193 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8194 const char *name;
8195 char *tlsname;
8197 name = XSTR (addr, 0);
8198 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8199 or the symbol will be in TLS private data section. */
8200 if (name[strlen (name) - 1] != ']'
8201 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8202 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8204 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8205 strcpy (tlsname, name);
8206 strcat (tlsname,
8207 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8208 tlsaddr = copy_rtx (addr);
8209 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8211 else
8212 tlsaddr = addr;
8214 /* Place addr into TOC constant pool. */
8215 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8217 /* Output the TOC entry and create the MEM referencing the value. */
8218 if (constant_pool_expr_p (XEXP (sym, 0))
8219 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8221 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8222 mem = gen_const_mem (Pmode, tocref);
8223 set_mem_alias_set (mem, get_TOC_alias_set ());
8225 else
8226 return sym;
8228 /* Use global-dynamic for local-dynamic. */
8229 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8230 || model == TLS_MODEL_LOCAL_DYNAMIC)
8232 /* Create new TOC reference for @m symbol. */
8233 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8234 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8235 strcpy (tlsname, "*LCM");
8236 strcat (tlsname, name + 3);
8237 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8238 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8239 tocref = create_TOC_reference (modaddr, NULL_RTX);
8240 rtx modmem = gen_const_mem (Pmode, tocref);
8241 set_mem_alias_set (modmem, get_TOC_alias_set ());
8243 rtx modreg = gen_reg_rtx (Pmode);
8244 emit_insn (gen_rtx_SET (modreg, modmem));
8246 tmpreg = gen_reg_rtx (Pmode);
8247 emit_insn (gen_rtx_SET (tmpreg, mem));
8249 dest = gen_reg_rtx (Pmode);
8250 if (TARGET_32BIT)
8251 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8252 else
8253 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8254 return dest;
8256 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8257 else if (TARGET_32BIT)
8259 tlsreg = gen_reg_rtx (SImode);
8260 emit_insn (gen_tls_get_tpointer (tlsreg));
8262 else
8263 tlsreg = gen_rtx_REG (DImode, 13);
8265 /* Load the TOC value into temporary register. */
8266 tmpreg = gen_reg_rtx (Pmode);
8267 emit_insn (gen_rtx_SET (tmpreg, mem));
8268 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8269 gen_rtx_MINUS (Pmode, addr, tlsreg));
8271 /* Add TOC symbol value to TLS pointer. */
8272 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8274 return dest;
8277 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8278 this (thread-local) address. */
8280 static rtx
8281 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8283 rtx dest, insn;
8285 if (TARGET_XCOFF)
8286 return rs6000_legitimize_tls_address_aix (addr, model);
8288 dest = gen_reg_rtx (Pmode);
8289 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8291 rtx tlsreg;
8293 if (TARGET_64BIT)
8295 tlsreg = gen_rtx_REG (Pmode, 13);
8296 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8298 else
8300 tlsreg = gen_rtx_REG (Pmode, 2);
8301 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8303 emit_insn (insn);
8305 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8307 rtx tlsreg, tmp;
8309 tmp = gen_reg_rtx (Pmode);
8310 if (TARGET_64BIT)
8312 tlsreg = gen_rtx_REG (Pmode, 13);
8313 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8315 else
8317 tlsreg = gen_rtx_REG (Pmode, 2);
8318 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8320 emit_insn (insn);
8321 if (TARGET_64BIT)
8322 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8323 else
8324 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8325 emit_insn (insn);
8327 else
8329 rtx r3, got, tga, tmp1, tmp2, call_insn;
8331 /* We currently use relocations like @got@tlsgd for tls, which
8332 means the linker will handle allocation of tls entries, placing
8333 them in the .got section. So use a pointer to the .got section,
8334 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8335 or to secondary GOT sections used by 32-bit -fPIC. */
8336 if (TARGET_64BIT)
8337 got = gen_rtx_REG (Pmode, 2);
8338 else
8340 if (flag_pic == 1)
8341 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8342 else
8344 rtx gsym = rs6000_got_sym ();
8345 got = gen_reg_rtx (Pmode);
8346 if (flag_pic == 0)
8347 rs6000_emit_move (got, gsym, Pmode);
8348 else
8350 rtx mem, lab, last;
8352 tmp1 = gen_reg_rtx (Pmode);
8353 tmp2 = gen_reg_rtx (Pmode);
8354 mem = gen_const_mem (Pmode, tmp1);
8355 lab = gen_label_rtx ();
8356 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8357 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8358 if (TARGET_LINK_STACK)
8359 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8360 emit_move_insn (tmp2, mem);
8361 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8362 set_unique_reg_note (last, REG_EQUAL, gsym);
8367 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8369 tga = rs6000_tls_get_addr ();
8370 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8371 1, const0_rtx, Pmode);
8373 r3 = gen_rtx_REG (Pmode, 3);
8374 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8376 if (TARGET_64BIT)
8377 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8378 else
8379 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8381 else if (DEFAULT_ABI == ABI_V4)
8382 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8383 else
8384 gcc_unreachable ();
8385 call_insn = last_call_insn ();
8386 PATTERN (call_insn) = insn;
8387 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8388 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8389 pic_offset_table_rtx);
8391 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8393 tga = rs6000_tls_get_addr ();
8394 tmp1 = gen_reg_rtx (Pmode);
8395 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8396 1, const0_rtx, Pmode);
8398 r3 = gen_rtx_REG (Pmode, 3);
8399 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8401 if (TARGET_64BIT)
8402 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8403 else
8404 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8406 else if (DEFAULT_ABI == ABI_V4)
8407 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8408 else
8409 gcc_unreachable ();
8410 call_insn = last_call_insn ();
8411 PATTERN (call_insn) = insn;
8412 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8413 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8414 pic_offset_table_rtx);
8416 if (rs6000_tls_size == 16)
8418 if (TARGET_64BIT)
8419 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8420 else
8421 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8423 else if (rs6000_tls_size == 32)
8425 tmp2 = gen_reg_rtx (Pmode);
8426 if (TARGET_64BIT)
8427 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8428 else
8429 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8430 emit_insn (insn);
8431 if (TARGET_64BIT)
8432 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8433 else
8434 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8436 else
8438 tmp2 = gen_reg_rtx (Pmode);
8439 if (TARGET_64BIT)
8440 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8441 else
8442 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8443 emit_insn (insn);
8444 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8446 emit_insn (insn);
8448 else
8450 /* IE, or 64-bit offset LE. */
8451 tmp2 = gen_reg_rtx (Pmode);
8452 if (TARGET_64BIT)
8453 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8454 else
8455 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8456 emit_insn (insn);
8457 if (TARGET_64BIT)
8458 insn = gen_tls_tls_64 (dest, tmp2, addr);
8459 else
8460 insn = gen_tls_tls_32 (dest, tmp2, addr);
8461 emit_insn (insn);
8465 return dest;
8468 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8470 static bool
8471 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8473 if (GET_CODE (x) == HIGH
8474 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8475 return true;
8477 /* A TLS symbol in the TOC cannot contain a sum. */
8478 if (GET_CODE (x) == CONST
8479 && GET_CODE (XEXP (x, 0)) == PLUS
8480 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8481 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8482 return true;
8484 /* Do not place an ELF TLS symbol in the constant pool. */
8485 return TARGET_ELF && tls_referenced_p (x);
8488 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8489 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8490 can be addressed relative to the toc pointer. */
8492 static bool
8493 use_toc_relative_ref (rtx sym, machine_mode mode)
8495 return ((constant_pool_expr_p (sym)
8496 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8497 get_pool_mode (sym)))
8498 || (TARGET_CMODEL == CMODEL_MEDIUM
8499 && SYMBOL_REF_LOCAL_P (sym)
8500 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8503 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8504 replace the input X, or the original X if no replacement is called for.
8505 The output parameter *WIN is 1 if the calling macro should goto WIN,
8506 0 if it should not.
8508 For RS/6000, we wish to handle large displacements off a base
8509 register by splitting the addend across an addiu/addis and the mem insn.
8510 This cuts number of extra insns needed from 3 to 1.
8512 On Darwin, we use this to generate code for floating point constants.
8513 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8514 The Darwin code is inside #if TARGET_MACHO because only then are the
8515 machopic_* functions defined. */
8516 static rtx
8517 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8518 int opnum, int type,
8519 int ind_levels ATTRIBUTE_UNUSED, int *win)
8521 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8522 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8524 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8525 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8526 if (reg_offset_p
8527 && opnum == 1
8528 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8529 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8530 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8531 && TARGET_P9_VECTOR)
8532 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8533 && TARGET_P9_VECTOR)))
8534 reg_offset_p = false;
8536 /* We must recognize output that we have already generated ourselves. */
8537 if (GET_CODE (x) == PLUS
8538 && GET_CODE (XEXP (x, 0)) == PLUS
8539 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8540 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8541 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8543 if (TARGET_DEBUG_ADDR)
8545 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8546 debug_rtx (x);
8548 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8549 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8550 opnum, (enum reload_type) type);
8551 *win = 1;
8552 return x;
8555 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8556 if (GET_CODE (x) == LO_SUM
8557 && GET_CODE (XEXP (x, 0)) == HIGH)
8559 if (TARGET_DEBUG_ADDR)
8561 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8562 debug_rtx (x);
8564 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8565 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8566 opnum, (enum reload_type) type);
8567 *win = 1;
8568 return x;
8571 #if TARGET_MACHO
8572 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8573 && GET_CODE (x) == LO_SUM
8574 && GET_CODE (XEXP (x, 0)) == PLUS
8575 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8576 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8577 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8578 && machopic_operand_p (XEXP (x, 1)))
8580 /* Result of previous invocation of this function on Darwin
8581 floating point constant. */
8582 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8583 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8584 opnum, (enum reload_type) type);
8585 *win = 1;
8586 return x;
8588 #endif
8590 if (TARGET_CMODEL != CMODEL_SMALL
8591 && reg_offset_p
8592 && !quad_offset_p
8593 && small_toc_ref (x, VOIDmode))
8595 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8596 x = gen_rtx_LO_SUM (Pmode, hi, x);
8597 if (TARGET_DEBUG_ADDR)
8599 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8600 debug_rtx (x);
8602 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8603 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8604 opnum, (enum reload_type) type);
8605 *win = 1;
8606 return x;
8609 if (GET_CODE (x) == PLUS
8610 && REG_P (XEXP (x, 0))
8611 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8612 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8613 && CONST_INT_P (XEXP (x, 1))
8614 && reg_offset_p
8615 && !SPE_VECTOR_MODE (mode)
8616 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
8617 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8619 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8620 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8621 HOST_WIDE_INT high
8622 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8624 /* Check for 32-bit overflow or quad addresses with one of the
8625 four least significant bits set. */
8626 if (high + low != val
8627 || (quad_offset_p && (low & 0xf)))
8629 *win = 0;
8630 return x;
8633 /* Reload the high part into a base reg; leave the low part
8634 in the mem directly. */
8636 x = gen_rtx_PLUS (GET_MODE (x),
8637 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8638 GEN_INT (high)),
8639 GEN_INT (low));
8641 if (TARGET_DEBUG_ADDR)
8643 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8644 debug_rtx (x);
8646 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8647 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8648 opnum, (enum reload_type) type);
8649 *win = 1;
8650 return x;
8653 if (GET_CODE (x) == SYMBOL_REF
8654 && reg_offset_p
8655 && !quad_offset_p
8656 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8657 && !SPE_VECTOR_MODE (mode)
8658 #if TARGET_MACHO
8659 && DEFAULT_ABI == ABI_DARWIN
8660 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8661 && machopic_symbol_defined_p (x)
8662 #else
8663 && DEFAULT_ABI == ABI_V4
8664 && !flag_pic
8665 #endif
8666 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8667 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8668 without fprs.
8669 ??? Assume floating point reg based on mode? This assumption is
8670 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8671 where reload ends up doing a DFmode load of a constant from
8672 mem using two gprs. Unfortunately, at this point reload
8673 hasn't yet selected regs so poking around in reload data
8674 won't help and even if we could figure out the regs reliably,
8675 we'd still want to allow this transformation when the mem is
8676 naturally aligned. Since we say the address is good here, we
8677 can't disable offsets from LO_SUMs in mem_operand_gpr.
8678 FIXME: Allow offset from lo_sum for other modes too, when
8679 mem is sufficiently aligned.
8681 Also disallow this if the type can go in VMX/Altivec registers, since
8682 those registers do not have d-form (reg+offset) address modes. */
8683 && !reg_addr[mode].scalar_in_vmx_p
8684 && mode != TFmode
8685 && mode != TDmode
8686 && mode != IFmode
8687 && mode != KFmode
8688 && (mode != TImode || !TARGET_VSX_TIMODE)
8689 && mode != PTImode
8690 && (mode != DImode || TARGET_POWERPC64)
8691 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
8692 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
8694 #if TARGET_MACHO
8695 if (flag_pic)
8697 rtx offset = machopic_gen_offset (x);
8698 x = gen_rtx_LO_SUM (GET_MODE (x),
8699 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
8700 gen_rtx_HIGH (Pmode, offset)), offset);
8702 else
8703 #endif
8704 x = gen_rtx_LO_SUM (GET_MODE (x),
8705 gen_rtx_HIGH (Pmode, x), x);
8707 if (TARGET_DEBUG_ADDR)
8709 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
8710 debug_rtx (x);
8712 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8713 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8714 opnum, (enum reload_type) type);
8715 *win = 1;
8716 return x;
8719 /* Reload an offset address wrapped by an AND that represents the
8720 masking of the lower bits. Strip the outer AND and let reload
8721 convert the offset address into an indirect address. For VSX,
8722 force reload to create the address with an AND in a separate
8723 register, because we can't guarantee an altivec register will
8724 be used. */
8725 if (VECTOR_MEM_ALTIVEC_P (mode)
8726 && GET_CODE (x) == AND
8727 && GET_CODE (XEXP (x, 0)) == PLUS
8728 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8729 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8730 && GET_CODE (XEXP (x, 1)) == CONST_INT
8731 && INTVAL (XEXP (x, 1)) == -16)
8733 x = XEXP (x, 0);
8734 *win = 1;
8735 return x;
8738 if (TARGET_TOC
8739 && reg_offset_p
8740 && !quad_offset_p
8741 && GET_CODE (x) == SYMBOL_REF
8742 && use_toc_relative_ref (x, mode))
8744 x = create_TOC_reference (x, NULL_RTX);
8745 if (TARGET_CMODEL != CMODEL_SMALL)
8747 if (TARGET_DEBUG_ADDR)
8749 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
8750 debug_rtx (x);
8752 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8753 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8754 opnum, (enum reload_type) type);
8756 *win = 1;
8757 return x;
8759 *win = 0;
8760 return x;
8763 /* Debug version of rs6000_legitimize_reload_address. */
8764 static rtx
8765 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
8766 int opnum, int type,
8767 int ind_levels, int *win)
8769 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
8770 ind_levels, win);
8771 fprintf (stderr,
8772 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
8773 "type = %d, ind_levels = %d, win = %d, original addr:\n",
8774 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
8775 debug_rtx (x);
8777 if (x == ret)
8778 fprintf (stderr, "Same address returned\n");
8779 else if (!ret)
8780 fprintf (stderr, "NULL returned\n");
8781 else
8783 fprintf (stderr, "New address:\n");
8784 debug_rtx (ret);
8787 return ret;
8790 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8791 that is a valid memory address for an instruction.
8792 The MODE argument is the machine mode for the MEM expression
8793 that wants to use this address.
8795 On the RS/6000, there are four valid address: a SYMBOL_REF that
8796 refers to a constant pool entry of an address (or the sum of it
8797 plus a constant), a short (16-bit signed) constant plus a register,
8798 the sum of two registers, or a register indirect, possibly with an
8799 auto-increment. For DFmode, DDmode and DImode with a constant plus
8800 register, we must ensure that both words are addressable or PowerPC64
8801 with offset word aligned.
8803 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8804 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8805 because adjacent memory cells are accessed by adding word-sized offsets
8806 during assembly output. */
8807 static bool
8808 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8810 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8811 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
8813 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8814 if (VECTOR_MEM_ALTIVEC_P (mode)
8815 && GET_CODE (x) == AND
8816 && GET_CODE (XEXP (x, 1)) == CONST_INT
8817 && INTVAL (XEXP (x, 1)) == -16)
8818 x = XEXP (x, 0);
8820 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8821 return 0;
8822 if (legitimate_indirect_address_p (x, reg_ok_strict))
8823 return 1;
8824 if (TARGET_UPDATE
8825 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8826 && mode_supports_pre_incdec_p (mode)
8827 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8828 return 1;
8829 /* Handle restricted vector d-form offsets in ISA 3.0. */
8830 if (quad_offset_p)
8832 if (quad_address_p (x, mode, reg_ok_strict))
8833 return 1;
8835 else if (virtual_stack_registers_memory_p (x))
8836 return 1;
8838 else if (reg_offset_p)
8840 if (legitimate_small_data_p (mode, x))
8841 return 1;
8842 if (legitimate_constant_pool_address_p (x, mode,
8843 reg_ok_strict || lra_in_progress))
8844 return 1;
8845 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
8846 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
8847 return 1;
8850 /* For TImode, if we have load/store quad and TImode in VSX registers, only
8851 allow register indirect addresses. This will allow the values to go in
8852 either GPRs or VSX registers without reloading. The vector types would
8853 tend to go into VSX registers, so we allow REG+REG, while TImode seems
8854 somewhat split, in that some uses are GPR based, and some VSX based. */
8855 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
8856 return 0;
8857 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8858 if (! reg_ok_strict
8859 && reg_offset_p
8860 && GET_CODE (x) == PLUS
8861 && GET_CODE (XEXP (x, 0)) == REG
8862 && (XEXP (x, 0) == virtual_stack_vars_rtx
8863 || XEXP (x, 0) == arg_pointer_rtx)
8864 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8865 return 1;
8866 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8867 return 1;
8868 if (!FLOAT128_2REG_P (mode)
8869 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8870 || TARGET_POWERPC64
8871 || (mode != DFmode && mode != DDmode)
8872 || (TARGET_E500_DOUBLE && mode != DDmode))
8873 && (TARGET_POWERPC64 || mode != DImode)
8874 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8875 && mode != PTImode
8876 && !avoiding_indexed_address_p (mode)
8877 && legitimate_indexed_address_p (x, reg_ok_strict))
8878 return 1;
8879 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8880 && mode_supports_pre_modify_p (mode)
8881 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8882 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8883 reg_ok_strict, false)
8884 || (!avoiding_indexed_address_p (mode)
8885 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8886 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8887 return 1;
8888 if (reg_offset_p && !quad_offset_p
8889 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8890 return 1;
8891 return 0;
8894 /* Debug version of rs6000_legitimate_address_p. */
8895 static bool
8896 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8897 bool reg_ok_strict)
8899 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8900 fprintf (stderr,
8901 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8902 "strict = %d, reload = %s, code = %s\n",
8903 ret ? "true" : "false",
8904 GET_MODE_NAME (mode),
8905 reg_ok_strict,
8906 (reload_completed
8907 ? "after"
8908 : (reload_in_progress ? "progress" : "before")),
8909 GET_RTX_NAME (GET_CODE (x)));
8910 debug_rtx (x);
8912 return ret;
8915 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8917 static bool
8918 rs6000_mode_dependent_address_p (const_rtx addr,
8919 addr_space_t as ATTRIBUTE_UNUSED)
8921 return rs6000_mode_dependent_address_ptr (addr);
8924 /* Go to LABEL if ADDR (a legitimate address expression)
8925 has an effect that depends on the machine mode it is used for.
8927 On the RS/6000 this is true of all integral offsets (since AltiVec
8928 and VSX modes don't allow them) or is a pre-increment or decrement.
8930 ??? Except that due to conceptual problems in offsettable_address_p
8931 we can't really report the problems of integral offsets. So leave
8932 this assuming that the adjustable offset must be valid for the
8933 sub-words of a TFmode operand, which is what we had before. */
8935 static bool
8936 rs6000_mode_dependent_address (const_rtx addr)
8938 switch (GET_CODE (addr))
8940 case PLUS:
8941 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8942 is considered a legitimate address before reload, so there
8943 are no offset restrictions in that case. Note that this
8944 condition is safe in strict mode because any address involving
8945 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8946 been rejected as illegitimate. */
8947 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8948 && XEXP (addr, 0) != arg_pointer_rtx
8949 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
8951 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
8952 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
8954 break;
8956 case LO_SUM:
8957 /* Anything in the constant pool is sufficiently aligned that
8958 all bytes have the same high part address. */
8959 return !legitimate_constant_pool_address_p (addr, QImode, false);
8961 /* Auto-increment cases are now treated generically in recog.c. */
8962 case PRE_MODIFY:
8963 return TARGET_UPDATE;
8965 /* AND is only allowed in Altivec loads. */
8966 case AND:
8967 return true;
8969 default:
8970 break;
8973 return false;
8976 /* Debug version of rs6000_mode_dependent_address. */
8977 static bool
8978 rs6000_debug_mode_dependent_address (const_rtx addr)
8980 bool ret = rs6000_mode_dependent_address (addr);
8982 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
8983 ret ? "true" : "false");
8984 debug_rtx (addr);
8986 return ret;
8989 /* Implement FIND_BASE_TERM. */
8992 rs6000_find_base_term (rtx op)
8994 rtx base;
8996 base = op;
8997 if (GET_CODE (base) == CONST)
8998 base = XEXP (base, 0);
8999 if (GET_CODE (base) == PLUS)
9000 base = XEXP (base, 0);
9001 if (GET_CODE (base) == UNSPEC)
9002 switch (XINT (base, 1))
9004 case UNSPEC_TOCREL:
9005 case UNSPEC_MACHOPIC_OFFSET:
9006 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9007 for aliasing purposes. */
9008 return XVECEXP (base, 0, 0);
9011 return op;
9014 /* More elaborate version of recog's offsettable_memref_p predicate
9015 that works around the ??? note of rs6000_mode_dependent_address.
9016 In particular it accepts
9018 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9020 in 32-bit mode, that the recog predicate rejects. */
9022 static bool
9023 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9025 bool worst_case;
9027 if (!MEM_P (op))
9028 return false;
9030 /* First mimic offsettable_memref_p. */
9031 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9032 return true;
9034 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9035 the latter predicate knows nothing about the mode of the memory
9036 reference and, therefore, assumes that it is the largest supported
9037 mode (TFmode). As a consequence, legitimate offsettable memory
9038 references are rejected. rs6000_legitimate_offset_address_p contains
9039 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9040 at least with a little bit of help here given that we know the
9041 actual registers used. */
9042 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9043 || GET_MODE_SIZE (reg_mode) == 4);
9044 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9045 true, worst_case);
9048 /* Determine the reassociation width to be used in reassociate_bb.
9049 This takes into account how many parallel operations we
9050 can actually do of a given type, and also the latency.
9052 int add/sub 6/cycle
9053 mul 2/cycle
9054 vect add/sub/mul 2/cycle
9055 fp add/sub/mul 2/cycle
9056 dfp 1/cycle
9059 static int
9060 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9061 enum machine_mode mode)
9063 switch (rs6000_cpu)
9065 case PROCESSOR_POWER8:
9066 case PROCESSOR_POWER9:
9067 if (DECIMAL_FLOAT_MODE_P (mode))
9068 return 1;
9069 if (VECTOR_MODE_P (mode))
9070 return 4;
9071 if (INTEGRAL_MODE_P (mode))
9072 return opc == MULT_EXPR ? 4 : 6;
9073 if (FLOAT_MODE_P (mode))
9074 return 4;
9075 break;
9076 default:
9077 break;
9079 return 1;
9082 /* Change register usage conditional on target flags. */
9083 static void
9084 rs6000_conditional_register_usage (void)
9086 int i;
9088 if (TARGET_DEBUG_TARGET)
9089 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9091 /* Set MQ register fixed (already call_used) so that it will not be
9092 allocated. */
9093 fixed_regs[64] = 1;
9095 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9096 if (TARGET_64BIT)
9097 fixed_regs[13] = call_used_regs[13]
9098 = call_really_used_regs[13] = 1;
9100 /* Conditionally disable FPRs. */
9101 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
9102 for (i = 32; i < 64; i++)
9103 fixed_regs[i] = call_used_regs[i]
9104 = call_really_used_regs[i] = 1;
9106 /* The TOC register is not killed across calls in a way that is
9107 visible to the compiler. */
9108 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9109 call_really_used_regs[2] = 0;
9111 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9112 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9114 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9115 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9116 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9117 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9119 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9120 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9121 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9122 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9124 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9125 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9126 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9128 if (TARGET_SPE)
9130 global_regs[SPEFSCR_REGNO] = 1;
9131 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
9132 registers in prologues and epilogues. We no longer use r14
9133 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
9134 pool for link-compatibility with older versions of GCC. Once
9135 "old" code has died out, we can return r14 to the allocation
9136 pool. */
9137 fixed_regs[14]
9138 = call_used_regs[14]
9139 = call_really_used_regs[14] = 1;
9142 if (!TARGET_ALTIVEC && !TARGET_VSX)
9144 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9145 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9146 call_really_used_regs[VRSAVE_REGNO] = 1;
9149 if (TARGET_ALTIVEC || TARGET_VSX)
9150 global_regs[VSCR_REGNO] = 1;
9152 if (TARGET_ALTIVEC_ABI)
9154 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9155 call_used_regs[i] = call_really_used_regs[i] = 1;
9157 /* AIX reserves VR20:31 in non-extended ABI mode. */
9158 if (TARGET_XCOFF)
9159 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9160 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9165 /* Output insns to set DEST equal to the constant SOURCE as a series of
9166 lis, ori and shl instructions and return TRUE. */
9168 bool
9169 rs6000_emit_set_const (rtx dest, rtx source)
9171 machine_mode mode = GET_MODE (dest);
9172 rtx temp, set;
9173 rtx_insn *insn;
9174 HOST_WIDE_INT c;
9176 gcc_checking_assert (CONST_INT_P (source));
9177 c = INTVAL (source);
9178 switch (mode)
9180 case QImode:
9181 case HImode:
9182 emit_insn (gen_rtx_SET (dest, source));
9183 return true;
9185 case SImode:
9186 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9188 emit_insn (gen_rtx_SET (copy_rtx (temp),
9189 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9190 emit_insn (gen_rtx_SET (dest,
9191 gen_rtx_IOR (SImode, copy_rtx (temp),
9192 GEN_INT (c & 0xffff))));
9193 break;
9195 case DImode:
9196 if (!TARGET_POWERPC64)
9198 rtx hi, lo;
9200 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9201 DImode);
9202 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9203 DImode);
9204 emit_move_insn (hi, GEN_INT (c >> 32));
9205 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9206 emit_move_insn (lo, GEN_INT (c));
9208 else
9209 rs6000_emit_set_long_const (dest, c);
9210 break;
9212 default:
9213 gcc_unreachable ();
9216 insn = get_last_insn ();
9217 set = single_set (insn);
9218 if (! CONSTANT_P (SET_SRC (set)))
9219 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9221 return true;
9224 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9225 Output insns to set DEST equal to the constant C as a series of
9226 lis, ori and shl instructions. */
9228 static void
9229 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9231 rtx temp;
9232 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9234 ud1 = c & 0xffff;
9235 c = c >> 16;
9236 ud2 = c & 0xffff;
9237 c = c >> 16;
9238 ud3 = c & 0xffff;
9239 c = c >> 16;
9240 ud4 = c & 0xffff;
9242 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9243 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9244 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9246 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9247 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9249 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9251 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9252 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9253 if (ud1 != 0)
9254 emit_move_insn (dest,
9255 gen_rtx_IOR (DImode, copy_rtx (temp),
9256 GEN_INT (ud1)));
9258 else if (ud3 == 0 && ud4 == 0)
9260 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9262 gcc_assert (ud2 & 0x8000);
9263 emit_move_insn (copy_rtx (temp),
9264 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9265 if (ud1 != 0)
9266 emit_move_insn (copy_rtx (temp),
9267 gen_rtx_IOR (DImode, copy_rtx (temp),
9268 GEN_INT (ud1)));
9269 emit_move_insn (dest,
9270 gen_rtx_ZERO_EXTEND (DImode,
9271 gen_lowpart (SImode,
9272 copy_rtx (temp))));
9274 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9275 || (ud4 == 0 && ! (ud3 & 0x8000)))
9277 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9279 emit_move_insn (copy_rtx (temp),
9280 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9281 if (ud2 != 0)
9282 emit_move_insn (copy_rtx (temp),
9283 gen_rtx_IOR (DImode, copy_rtx (temp),
9284 GEN_INT (ud2)));
9285 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9286 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9287 GEN_INT (16)));
9288 if (ud1 != 0)
9289 emit_move_insn (dest,
9290 gen_rtx_IOR (DImode, copy_rtx (temp),
9291 GEN_INT (ud1)));
9293 else
9295 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9297 emit_move_insn (copy_rtx (temp),
9298 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9299 if (ud3 != 0)
9300 emit_move_insn (copy_rtx (temp),
9301 gen_rtx_IOR (DImode, copy_rtx (temp),
9302 GEN_INT (ud3)));
9304 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9305 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9306 GEN_INT (32)));
9307 if (ud2 != 0)
9308 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9309 gen_rtx_IOR (DImode, copy_rtx (temp),
9310 GEN_INT (ud2 << 16)));
9311 if (ud1 != 0)
9312 emit_move_insn (dest,
9313 gen_rtx_IOR (DImode, copy_rtx (temp),
9314 GEN_INT (ud1)));
9318 /* Helper for the following. Get rid of [r+r] memory refs
9319 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9321 static void
9322 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9324 if (reload_in_progress)
9325 return;
9327 if (GET_CODE (operands[0]) == MEM
9328 && GET_CODE (XEXP (operands[0], 0)) != REG
9329 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9330 GET_MODE (operands[0]), false))
9331 operands[0]
9332 = replace_equiv_address (operands[0],
9333 copy_addr_to_reg (XEXP (operands[0], 0)));
9335 if (GET_CODE (operands[1]) == MEM
9336 && GET_CODE (XEXP (operands[1], 0)) != REG
9337 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9338 GET_MODE (operands[1]), false))
9339 operands[1]
9340 = replace_equiv_address (operands[1],
9341 copy_addr_to_reg (XEXP (operands[1], 0)));
9344 /* Generate a vector of constants to permute MODE for a little-endian
9345 storage operation by swapping the two halves of a vector. */
9346 static rtvec
9347 rs6000_const_vec (machine_mode mode)
9349 int i, subparts;
9350 rtvec v;
9352 switch (mode)
9354 case V1TImode:
9355 subparts = 1;
9356 break;
9357 case V2DFmode:
9358 case V2DImode:
9359 subparts = 2;
9360 break;
9361 case V4SFmode:
9362 case V4SImode:
9363 subparts = 4;
9364 break;
9365 case V8HImode:
9366 subparts = 8;
9367 break;
9368 case V16QImode:
9369 subparts = 16;
9370 break;
9371 default:
9372 gcc_unreachable();
9375 v = rtvec_alloc (subparts);
9377 for (i = 0; i < subparts / 2; ++i)
9378 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9379 for (i = subparts / 2; i < subparts; ++i)
9380 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9382 return v;
9385 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
9386 for a VSX load or store operation. */
9388 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
9390 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
9391 128-bit integers if they are allowed in VSX registers. */
9392 if (FLOAT128_VECTOR_P (mode) || mode == TImode)
9393 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
9394 else
9396 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9397 return gen_rtx_VEC_SELECT (mode, source, par);
9401 /* Emit a little-endian load from vector memory location SOURCE to VSX
9402 register DEST in mode MODE. The load is done with two permuting
9403 insn's that represent an lxvd2x and xxpermdi. */
9404 void
9405 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9407 rtx tmp, permute_mem, permute_reg;
9409 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9410 V1TImode). */
9411 if (mode == TImode || mode == V1TImode)
9413 mode = V2DImode;
9414 dest = gen_lowpart (V2DImode, dest);
9415 source = adjust_address (source, V2DImode, 0);
9418 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9419 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
9420 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
9421 emit_insn (gen_rtx_SET (tmp, permute_mem));
9422 emit_insn (gen_rtx_SET (dest, permute_reg));
9425 /* Emit a little-endian store to vector memory location DEST from VSX
9426 register SOURCE in mode MODE. The store is done with two permuting
9427 insn's that represent an xxpermdi and an stxvd2x. */
9428 void
9429 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9431 rtx tmp, permute_src, permute_tmp;
9433 /* This should never be called during or after reload, because it does
9434 not re-permute the source register. It is intended only for use
9435 during expand. */
9436 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
9438 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9439 V1TImode). */
9440 if (mode == TImode || mode == V1TImode)
9442 mode = V2DImode;
9443 dest = adjust_address (dest, V2DImode, 0);
9444 source = gen_lowpart (V2DImode, source);
9447 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9448 permute_src = rs6000_gen_le_vsx_permute (source, mode);
9449 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
9450 emit_insn (gen_rtx_SET (tmp, permute_src));
9451 emit_insn (gen_rtx_SET (dest, permute_tmp));
9454 /* Emit a sequence representing a little-endian VSX load or store,
9455 moving data from SOURCE to DEST in mode MODE. This is done
9456 separately from rs6000_emit_move to ensure it is called only
9457 during expand. LE VSX loads and stores introduced later are
9458 handled with a split. The expand-time RTL generation allows
9459 us to optimize away redundant pairs of register-permutes. */
9460 void
9461 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9463 gcc_assert (!BYTES_BIG_ENDIAN
9464 && VECTOR_MEM_VSX_P (mode)
9465 && !TARGET_P9_VECTOR
9466 && !gpr_or_gpr_p (dest, source)
9467 && (MEM_P (source) ^ MEM_P (dest)));
9469 if (MEM_P (source))
9471 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9472 rs6000_emit_le_vsx_load (dest, source, mode);
9474 else
9476 if (!REG_P (source))
9477 source = force_reg (mode, source);
9478 rs6000_emit_le_vsx_store (dest, source, mode);
9482 /* Emit a move from SOURCE to DEST in mode MODE. */
9483 void
9484 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9486 rtx operands[2];
9487 operands[0] = dest;
9488 operands[1] = source;
9490 if (TARGET_DEBUG_ADDR)
9492 fprintf (stderr,
9493 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
9494 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9495 GET_MODE_NAME (mode),
9496 reload_in_progress,
9497 reload_completed,
9498 can_create_pseudo_p ());
9499 debug_rtx (dest);
9500 fprintf (stderr, "source:\n");
9501 debug_rtx (source);
9504 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9505 if (CONST_WIDE_INT_P (operands[1])
9506 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9508 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9509 gcc_unreachable ();
9512 /* Check if GCC is setting up a block move that will end up using FP
9513 registers as temporaries. We must make sure this is acceptable. */
9514 if (GET_CODE (operands[0]) == MEM
9515 && GET_CODE (operands[1]) == MEM
9516 && mode == DImode
9517 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
9518 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
9519 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
9520 ? 32 : MEM_ALIGN (operands[0])))
9521 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
9522 ? 32
9523 : MEM_ALIGN (operands[1]))))
9524 && ! MEM_VOLATILE_P (operands [0])
9525 && ! MEM_VOLATILE_P (operands [1]))
9527 emit_move_insn (adjust_address (operands[0], SImode, 0),
9528 adjust_address (operands[1], SImode, 0));
9529 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9530 adjust_address (copy_rtx (operands[1]), SImode, 4));
9531 return;
9534 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9535 && !gpc_reg_operand (operands[1], mode))
9536 operands[1] = force_reg (mode, operands[1]);
9538 /* Recognize the case where operand[1] is a reference to thread-local
9539 data and load its address to a register. */
9540 if (tls_referenced_p (operands[1]))
9542 enum tls_model model;
9543 rtx tmp = operands[1];
9544 rtx addend = NULL;
9546 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9548 addend = XEXP (XEXP (tmp, 0), 1);
9549 tmp = XEXP (XEXP (tmp, 0), 0);
9552 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9553 model = SYMBOL_REF_TLS_MODEL (tmp);
9554 gcc_assert (model != 0);
9556 tmp = rs6000_legitimize_tls_address (tmp, model);
9557 if (addend)
9559 tmp = gen_rtx_PLUS (mode, tmp, addend);
9560 tmp = force_operand (tmp, operands[0]);
9562 operands[1] = tmp;
9565 /* Handle the case where reload calls us with an invalid address. */
9566 if (reload_in_progress && mode == Pmode
9567 && (! general_operand (operands[1], mode)
9568 || ! nonimmediate_operand (operands[0], mode)))
9569 goto emit_set;
9571 /* 128-bit constant floating-point values on Darwin should really be loaded
9572 as two parts. However, this premature splitting is a problem when DFmode
9573 values can go into Altivec registers. */
9574 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9575 && GET_CODE (operands[1]) == CONST_DOUBLE)
9577 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9578 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9579 DFmode);
9580 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9581 GET_MODE_SIZE (DFmode)),
9582 simplify_gen_subreg (DFmode, operands[1], mode,
9583 GET_MODE_SIZE (DFmode)),
9584 DFmode);
9585 return;
9588 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
9589 cfun->machine->sdmode_stack_slot =
9590 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
9593 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9594 p1:SD) if p1 is not of floating point class and p0 is spilled as
9595 we can have no analogous movsd_store for this. */
9596 if (lra_in_progress && mode == DDmode
9597 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9598 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9599 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9600 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9602 enum reg_class cl;
9603 int regno = REGNO (SUBREG_REG (operands[1]));
9605 if (regno >= FIRST_PSEUDO_REGISTER)
9607 cl = reg_preferred_class (regno);
9608 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9610 if (regno >= 0 && ! FP_REGNO_P (regno))
9612 mode = SDmode;
9613 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9614 operands[1] = SUBREG_REG (operands[1]);
9617 if (lra_in_progress
9618 && mode == SDmode
9619 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9620 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9621 && (REG_P (operands[1])
9622 || (GET_CODE (operands[1]) == SUBREG
9623 && REG_P (SUBREG_REG (operands[1])))))
9625 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
9626 ? SUBREG_REG (operands[1]) : operands[1]);
9627 enum reg_class cl;
9629 if (regno >= FIRST_PSEUDO_REGISTER)
9631 cl = reg_preferred_class (regno);
9632 gcc_assert (cl != NO_REGS);
9633 regno = ira_class_hard_regs[cl][0];
9635 if (FP_REGNO_P (regno))
9637 if (GET_MODE (operands[0]) != DDmode)
9638 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9639 emit_insn (gen_movsd_store (operands[0], operands[1]));
9641 else if (INT_REGNO_P (regno))
9642 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9643 else
9644 gcc_unreachable();
9645 return;
9647 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9648 p:DD)) if p0 is not of floating point class and p1 is spilled as
9649 we can have no analogous movsd_load for this. */
9650 if (lra_in_progress && mode == DDmode
9651 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
9652 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9653 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
9654 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9656 enum reg_class cl;
9657 int regno = REGNO (SUBREG_REG (operands[0]));
9659 if (regno >= FIRST_PSEUDO_REGISTER)
9661 cl = reg_preferred_class (regno);
9662 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9664 if (regno >= 0 && ! FP_REGNO_P (regno))
9666 mode = SDmode;
9667 operands[0] = SUBREG_REG (operands[0]);
9668 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9671 if (lra_in_progress
9672 && mode == SDmode
9673 && (REG_P (operands[0])
9674 || (GET_CODE (operands[0]) == SUBREG
9675 && REG_P (SUBREG_REG (operands[0]))))
9676 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
9677 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9679 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
9680 ? SUBREG_REG (operands[0]) : operands[0]);
9681 enum reg_class cl;
9683 if (regno >= FIRST_PSEUDO_REGISTER)
9685 cl = reg_preferred_class (regno);
9686 gcc_assert (cl != NO_REGS);
9687 regno = ira_class_hard_regs[cl][0];
9689 if (FP_REGNO_P (regno))
9691 if (GET_MODE (operands[1]) != DDmode)
9692 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9693 emit_insn (gen_movsd_load (operands[0], operands[1]));
9695 else if (INT_REGNO_P (regno))
9696 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9697 else
9698 gcc_unreachable();
9699 return;
9702 if (reload_in_progress
9703 && mode == SDmode
9704 && cfun->machine->sdmode_stack_slot != NULL_RTX
9705 && MEM_P (operands[0])
9706 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
9707 && REG_P (operands[1]))
9709 if (FP_REGNO_P (REGNO (operands[1])))
9711 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
9712 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9713 emit_insn (gen_movsd_store (mem, operands[1]));
9715 else if (INT_REGNO_P (REGNO (operands[1])))
9717 rtx mem = operands[0];
9718 if (BYTES_BIG_ENDIAN)
9719 mem = adjust_address_nv (mem, mode, 4);
9720 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9721 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
9723 else
9724 gcc_unreachable();
9725 return;
9727 if (reload_in_progress
9728 && mode == SDmode
9729 && REG_P (operands[0])
9730 && MEM_P (operands[1])
9731 && cfun->machine->sdmode_stack_slot != NULL_RTX
9732 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
9734 if (FP_REGNO_P (REGNO (operands[0])))
9736 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
9737 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9738 emit_insn (gen_movsd_load (operands[0], mem));
9740 else if (INT_REGNO_P (REGNO (operands[0])))
9742 rtx mem = operands[1];
9743 if (BYTES_BIG_ENDIAN)
9744 mem = adjust_address_nv (mem, mode, 4);
9745 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
9746 emit_insn (gen_movsd_hardfloat (operands[0], mem));
9748 else
9749 gcc_unreachable();
9750 return;
9753 /* FIXME: In the long term, this switch statement should go away
9754 and be replaced by a sequence of tests based on things like
9755 mode == Pmode. */
9756 switch (mode)
9758 case HImode:
9759 case QImode:
9760 if (CONSTANT_P (operands[1])
9761 && GET_CODE (operands[1]) != CONST_INT)
9762 operands[1] = force_const_mem (mode, operands[1]);
9763 break;
9765 case TFmode:
9766 case TDmode:
9767 case IFmode:
9768 case KFmode:
9769 if (FLOAT128_2REG_P (mode))
9770 rs6000_eliminate_indexed_memrefs (operands);
9771 /* fall through */
9773 case DFmode:
9774 case DDmode:
9775 case SFmode:
9776 case SDmode:
9777 if (CONSTANT_P (operands[1])
9778 && ! easy_fp_constant (operands[1], mode))
9779 operands[1] = force_const_mem (mode, operands[1]);
9780 break;
9782 case V16QImode:
9783 case V8HImode:
9784 case V4SFmode:
9785 case V4SImode:
9786 case V4HImode:
9787 case V2SFmode:
9788 case V2SImode:
9789 case V1DImode:
9790 case V2DFmode:
9791 case V2DImode:
9792 case V1TImode:
9793 if (CONSTANT_P (operands[1])
9794 && !easy_vector_constant (operands[1], mode))
9795 operands[1] = force_const_mem (mode, operands[1]);
9796 break;
9798 case SImode:
9799 case DImode:
9800 /* Use default pattern for address of ELF small data */
9801 if (TARGET_ELF
9802 && mode == Pmode
9803 && DEFAULT_ABI == ABI_V4
9804 && (GET_CODE (operands[1]) == SYMBOL_REF
9805 || GET_CODE (operands[1]) == CONST)
9806 && small_data_operand (operands[1], mode))
9808 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9809 return;
9812 if (DEFAULT_ABI == ABI_V4
9813 && mode == Pmode && mode == SImode
9814 && flag_pic == 1 && got_operand (operands[1], mode))
9816 emit_insn (gen_movsi_got (operands[0], operands[1]));
9817 return;
9820 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9821 && TARGET_NO_TOC
9822 && ! flag_pic
9823 && mode == Pmode
9824 && CONSTANT_P (operands[1])
9825 && GET_CODE (operands[1]) != HIGH
9826 && GET_CODE (operands[1]) != CONST_INT)
9828 rtx target = (!can_create_pseudo_p ()
9829 ? operands[0]
9830 : gen_reg_rtx (mode));
9832 /* If this is a function address on -mcall-aixdesc,
9833 convert it to the address of the descriptor. */
9834 if (DEFAULT_ABI == ABI_AIX
9835 && GET_CODE (operands[1]) == SYMBOL_REF
9836 && XSTR (operands[1], 0)[0] == '.')
9838 const char *name = XSTR (operands[1], 0);
9839 rtx new_ref;
9840 while (*name == '.')
9841 name++;
9842 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9843 CONSTANT_POOL_ADDRESS_P (new_ref)
9844 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9845 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9846 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9847 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9848 operands[1] = new_ref;
9851 if (DEFAULT_ABI == ABI_DARWIN)
9853 #if TARGET_MACHO
9854 if (MACHO_DYNAMIC_NO_PIC_P)
9856 /* Take care of any required data indirection. */
9857 operands[1] = rs6000_machopic_legitimize_pic_address (
9858 operands[1], mode, operands[0]);
9859 if (operands[0] != operands[1])
9860 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9861 return;
9863 #endif
9864 emit_insn (gen_macho_high (target, operands[1]));
9865 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9866 return;
9869 emit_insn (gen_elf_high (target, operands[1]));
9870 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9871 return;
9874 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9875 and we have put it in the TOC, we just need to make a TOC-relative
9876 reference to it. */
9877 if (TARGET_TOC
9878 && GET_CODE (operands[1]) == SYMBOL_REF
9879 && use_toc_relative_ref (operands[1], mode))
9880 operands[1] = create_TOC_reference (operands[1], operands[0]);
9881 else if (mode == Pmode
9882 && CONSTANT_P (operands[1])
9883 && GET_CODE (operands[1]) != HIGH
9884 && ((GET_CODE (operands[1]) != CONST_INT
9885 && ! easy_fp_constant (operands[1], mode))
9886 || (GET_CODE (operands[1]) == CONST_INT
9887 && (num_insns_constant (operands[1], mode)
9888 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9889 || (GET_CODE (operands[0]) == REG
9890 && FP_REGNO_P (REGNO (operands[0]))))
9891 && !toc_relative_expr_p (operands[1], false)
9892 && (TARGET_CMODEL == CMODEL_SMALL
9893 || can_create_pseudo_p ()
9894 || (REG_P (operands[0])
9895 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9898 #if TARGET_MACHO
9899 /* Darwin uses a special PIC legitimizer. */
9900 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9902 operands[1] =
9903 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9904 operands[0]);
9905 if (operands[0] != operands[1])
9906 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9907 return;
9909 #endif
9911 /* If we are to limit the number of things we put in the TOC and
9912 this is a symbol plus a constant we can add in one insn,
9913 just put the symbol in the TOC and add the constant. Don't do
9914 this if reload is in progress. */
9915 if (GET_CODE (operands[1]) == CONST
9916 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
9917 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9918 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9919 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9920 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
9921 && ! side_effects_p (operands[0]))
9923 rtx sym =
9924 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9925 rtx other = XEXP (XEXP (operands[1], 0), 1);
9927 sym = force_reg (mode, sym);
9928 emit_insn (gen_add3_insn (operands[0], sym, other));
9929 return;
9932 operands[1] = force_const_mem (mode, operands[1]);
9934 if (TARGET_TOC
9935 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
9936 && constant_pool_expr_p (XEXP (operands[1], 0))
9937 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
9938 get_pool_constant (XEXP (operands[1], 0)),
9939 get_pool_mode (XEXP (operands[1], 0))))
9941 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9942 operands[0]);
9943 operands[1] = gen_const_mem (mode, tocref);
9944 set_mem_alias_set (operands[1], get_TOC_alias_set ());
9947 break;
9949 case TImode:
9950 if (!VECTOR_MEM_VSX_P (TImode))
9951 rs6000_eliminate_indexed_memrefs (operands);
9952 break;
9954 case PTImode:
9955 rs6000_eliminate_indexed_memrefs (operands);
9956 break;
9958 default:
9959 fatal_insn ("bad move", gen_rtx_SET (dest, source));
9962 /* Above, we may have called force_const_mem which may have returned
9963 an invalid address. If we can, fix this up; otherwise, reload will
9964 have to deal with it. */
9965 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
9966 operands[1] = validize_mem (operands[1]);
9968 emit_set:
9969 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9972 /* Return true if a structure, union or array containing FIELD should be
9973 accessed using `BLKMODE'.
9975 For the SPE, simd types are V2SI, and gcc can be tempted to put the
9976 entire thing in a DI and use subregs to access the internals.
9977 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
9978 back-end. Because a single GPR can hold a V2SI, but not a DI, the
9979 best thing to do is set structs to BLKmode and avoid Severe Tire
9980 Damage.
9982 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
9983 fit into 1, whereas DI still needs two. */
9985 static bool
9986 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
9988 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
9989 || (TARGET_E500_DOUBLE && mode == DFmode));
9992 /* Nonzero if we can use a floating-point register to pass this arg. */
9993 #define USE_FP_FOR_ARG_P(CUM,MODE) \
9994 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
9995 && (CUM)->fregno <= FP_ARG_MAX_REG \
9996 && TARGET_HARD_FLOAT && TARGET_FPRS)
9998 /* Nonzero if we can use an AltiVec register to pass this arg. */
9999 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10000 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10001 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10002 && TARGET_ALTIVEC_ABI \
10003 && (NAMED))
10005 /* Walk down the type tree of TYPE counting consecutive base elements.
10006 If *MODEP is VOIDmode, then set it to the first valid floating point
10007 or vector type. If a non-floating point or vector type is found, or
10008 if a floating point or vector type that doesn't match a non-VOIDmode
10009 *MODEP is found, then return -1, otherwise return the count in the
10010 sub-tree. */
10012 static int
10013 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10015 machine_mode mode;
10016 HOST_WIDE_INT size;
10018 switch (TREE_CODE (type))
10020 case REAL_TYPE:
10021 mode = TYPE_MODE (type);
10022 if (!SCALAR_FLOAT_MODE_P (mode))
10023 return -1;
10025 if (*modep == VOIDmode)
10026 *modep = mode;
10028 if (*modep == mode)
10029 return 1;
10031 break;
10033 case COMPLEX_TYPE:
10034 mode = TYPE_MODE (TREE_TYPE (type));
10035 if (!SCALAR_FLOAT_MODE_P (mode))
10036 return -1;
10038 if (*modep == VOIDmode)
10039 *modep = mode;
10041 if (*modep == mode)
10042 return 2;
10044 break;
10046 case VECTOR_TYPE:
10047 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10048 return -1;
10050 /* Use V4SImode as representative of all 128-bit vector types. */
10051 size = int_size_in_bytes (type);
10052 switch (size)
10054 case 16:
10055 mode = V4SImode;
10056 break;
10057 default:
10058 return -1;
10061 if (*modep == VOIDmode)
10062 *modep = mode;
10064 /* Vector modes are considered to be opaque: two vectors are
10065 equivalent for the purposes of being homogeneous aggregates
10066 if they are the same size. */
10067 if (*modep == mode)
10068 return 1;
10070 break;
10072 case ARRAY_TYPE:
10074 int count;
10075 tree index = TYPE_DOMAIN (type);
10077 /* Can't handle incomplete types nor sizes that are not
10078 fixed. */
10079 if (!COMPLETE_TYPE_P (type)
10080 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10081 return -1;
10083 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10084 if (count == -1
10085 || !index
10086 || !TYPE_MAX_VALUE (index)
10087 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10088 || !TYPE_MIN_VALUE (index)
10089 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10090 || count < 0)
10091 return -1;
10093 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10094 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10096 /* There must be no padding. */
10097 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10098 return -1;
10100 return count;
10103 case RECORD_TYPE:
10105 int count = 0;
10106 int sub_count;
10107 tree field;
10109 /* Can't handle incomplete types nor sizes that are not
10110 fixed. */
10111 if (!COMPLETE_TYPE_P (type)
10112 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10113 return -1;
10115 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10117 if (TREE_CODE (field) != FIELD_DECL)
10118 continue;
10120 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10121 if (sub_count < 0)
10122 return -1;
10123 count += sub_count;
10126 /* There must be no padding. */
10127 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10128 return -1;
10130 return count;
10133 case UNION_TYPE:
10134 case QUAL_UNION_TYPE:
10136 /* These aren't very interesting except in a degenerate case. */
10137 int count = 0;
10138 int sub_count;
10139 tree field;
10141 /* Can't handle incomplete types nor sizes that are not
10142 fixed. */
10143 if (!COMPLETE_TYPE_P (type)
10144 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10145 return -1;
10147 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10149 if (TREE_CODE (field) != FIELD_DECL)
10150 continue;
10152 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10153 if (sub_count < 0)
10154 return -1;
10155 count = count > sub_count ? count : sub_count;
10158 /* There must be no padding. */
10159 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10160 return -1;
10162 return count;
10165 default:
10166 break;
10169 return -1;
10172 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10173 float or vector aggregate that shall be passed in FP/vector registers
10174 according to the ELFv2 ABI, return the homogeneous element mode in
10175 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10177 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10179 static bool
10180 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10181 machine_mode *elt_mode,
10182 int *n_elts)
10184 /* Note that we do not accept complex types at the top level as
10185 homogeneous aggregates; these types are handled via the
10186 targetm.calls.split_complex_arg mechanism. Complex types
10187 can be elements of homogeneous aggregates, however. */
10188 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
10190 machine_mode field_mode = VOIDmode;
10191 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10193 if (field_count > 0)
10195 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
10196 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
10198 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10199 up to AGGR_ARG_NUM_REG registers. */
10200 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
10202 if (elt_mode)
10203 *elt_mode = field_mode;
10204 if (n_elts)
10205 *n_elts = field_count;
10206 return true;
10211 if (elt_mode)
10212 *elt_mode = mode;
10213 if (n_elts)
10214 *n_elts = 1;
10215 return false;
10218 /* Return a nonzero value to say to return the function value in
10219 memory, just as large structures are always returned. TYPE will be
10220 the data type of the value, and FNTYPE will be the type of the
10221 function doing the returning, or @code{NULL} for libcalls.
10223 The AIX ABI for the RS/6000 specifies that all structures are
10224 returned in memory. The Darwin ABI does the same.
10226 For the Darwin 64 Bit ABI, a function result can be returned in
10227 registers or in memory, depending on the size of the return data
10228 type. If it is returned in registers, the value occupies the same
10229 registers as it would if it were the first and only function
10230 argument. Otherwise, the function places its result in memory at
10231 the location pointed to by GPR3.
10233 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10234 but a draft put them in memory, and GCC used to implement the draft
10235 instead of the final standard. Therefore, aix_struct_return
10236 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10237 compatibility can change DRAFT_V4_STRUCT_RET to override the
10238 default, and -m switches get the final word. See
10239 rs6000_option_override_internal for more details.
10241 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10242 long double support is enabled. These values are returned in memory.
10244 int_size_in_bytes returns -1 for variable size objects, which go in
10245 memory always. The cast to unsigned makes -1 > 8. */
10247 static bool
10248 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10250 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10251 if (TARGET_MACHO
10252 && rs6000_darwin64_abi
10253 && TREE_CODE (type) == RECORD_TYPE
10254 && int_size_in_bytes (type) > 0)
10256 CUMULATIVE_ARGS valcum;
10257 rtx valret;
10259 valcum.words = 0;
10260 valcum.fregno = FP_ARG_MIN_REG;
10261 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10262 /* Do a trial code generation as if this were going to be passed
10263 as an argument; if any part goes in memory, we return NULL. */
10264 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10265 if (valret)
10266 return false;
10267 /* Otherwise fall through to more conventional ABI rules. */
10270 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10271 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10272 NULL, NULL))
10273 return false;
10275 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10276 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10277 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10278 return false;
10280 if (AGGREGATE_TYPE_P (type)
10281 && (aix_struct_return
10282 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10283 return true;
10285 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10286 modes only exist for GCC vector types if -maltivec. */
10287 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10288 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10289 return false;
10291 /* Return synthetic vectors in memory. */
10292 if (TREE_CODE (type) == VECTOR_TYPE
10293 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10295 static bool warned_for_return_big_vectors = false;
10296 if (!warned_for_return_big_vectors)
10298 warning (0, "GCC vector returned by reference: "
10299 "non-standard ABI extension with no compatibility guarantee");
10300 warned_for_return_big_vectors = true;
10302 return true;
10305 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10306 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10307 return true;
10309 return false;
10312 /* Specify whether values returned in registers should be at the most
10313 significant end of a register. We want aggregates returned by
10314 value to match the way aggregates are passed to functions. */
10316 static bool
10317 rs6000_return_in_msb (const_tree valtype)
10319 return (DEFAULT_ABI == ABI_ELFv2
10320 && BYTES_BIG_ENDIAN
10321 && AGGREGATE_TYPE_P (valtype)
10322 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
10325 #ifdef HAVE_AS_GNU_ATTRIBUTE
10326 /* Return TRUE if a call to function FNDECL may be one that
10327 potentially affects the function calling ABI of the object file. */
10329 static bool
10330 call_ABI_of_interest (tree fndecl)
10332 if (symtab->state == EXPANSION)
10334 struct cgraph_node *c_node;
10336 /* Libcalls are always interesting. */
10337 if (fndecl == NULL_TREE)
10338 return true;
10340 /* Any call to an external function is interesting. */
10341 if (DECL_EXTERNAL (fndecl))
10342 return true;
10344 /* Interesting functions that we are emitting in this object file. */
10345 c_node = cgraph_node::get (fndecl);
10346 c_node = c_node->ultimate_alias_target ();
10347 return !c_node->only_called_directly_p ();
10349 return false;
10351 #endif
10353 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10354 for a call to a function whose data type is FNTYPE.
10355 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10357 For incoming args we set the number of arguments in the prototype large
10358 so we never return a PARALLEL. */
10360 void
10361 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10362 rtx libname ATTRIBUTE_UNUSED, int incoming,
10363 int libcall, int n_named_args,
10364 tree fndecl ATTRIBUTE_UNUSED,
10365 machine_mode return_mode ATTRIBUTE_UNUSED)
10367 static CUMULATIVE_ARGS zero_cumulative;
10369 *cum = zero_cumulative;
10370 cum->words = 0;
10371 cum->fregno = FP_ARG_MIN_REG;
10372 cum->vregno = ALTIVEC_ARG_MIN_REG;
10373 cum->prototype = (fntype && prototype_p (fntype));
10374 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10375 ? CALL_LIBCALL : CALL_NORMAL);
10376 cum->sysv_gregno = GP_ARG_MIN_REG;
10377 cum->stdarg = stdarg_p (fntype);
10378 cum->libcall = libcall;
10380 cum->nargs_prototype = 0;
10381 if (incoming || cum->prototype)
10382 cum->nargs_prototype = n_named_args;
10384 /* Check for a longcall attribute. */
10385 if ((!fntype && rs6000_default_long_calls)
10386 || (fntype
10387 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10388 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10389 cum->call_cookie |= CALL_LONG;
10391 if (TARGET_DEBUG_ARG)
10393 fprintf (stderr, "\ninit_cumulative_args:");
10394 if (fntype)
10396 tree ret_type = TREE_TYPE (fntype);
10397 fprintf (stderr, " ret code = %s,",
10398 get_tree_code_name (TREE_CODE (ret_type)));
10401 if (cum->call_cookie & CALL_LONG)
10402 fprintf (stderr, " longcall,");
10404 fprintf (stderr, " proto = %d, nargs = %d\n",
10405 cum->prototype, cum->nargs_prototype);
10408 #ifdef HAVE_AS_GNU_ATTRIBUTE
10409 if (DEFAULT_ABI == ABI_V4)
10411 cum->escapes = call_ABI_of_interest (fndecl);
10412 if (cum->escapes)
10414 tree return_type;
10416 if (fntype)
10418 return_type = TREE_TYPE (fntype);
10419 return_mode = TYPE_MODE (return_type);
10421 else
10422 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10424 if (return_type != NULL)
10426 if (TREE_CODE (return_type) == RECORD_TYPE
10427 && TYPE_TRANSPARENT_AGGR (return_type))
10429 return_type = TREE_TYPE (first_field (return_type));
10430 return_mode = TYPE_MODE (return_type);
10432 if (AGGREGATE_TYPE_P (return_type)
10433 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10434 <= 8))
10435 rs6000_returns_struct = true;
10437 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (return_mode))
10438 rs6000_passes_float = true;
10439 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
10440 || SPE_VECTOR_MODE (return_mode))
10441 rs6000_passes_vector = true;
10444 #endif
10446 if (fntype
10447 && !TARGET_ALTIVEC
10448 && TARGET_ALTIVEC_ABI
10449 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10451 error ("cannot return value in vector register because"
10452 " altivec instructions are disabled, use -maltivec"
10453 " to enable them");
10457 /* The mode the ABI uses for a word. This is not the same as word_mode
10458 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10460 static machine_mode
10461 rs6000_abi_word_mode (void)
10463 return TARGET_32BIT ? SImode : DImode;
10466 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10467 static char *
10468 rs6000_offload_options (void)
10470 if (TARGET_64BIT)
10471 return xstrdup ("-foffload-abi=lp64");
10472 else
10473 return xstrdup ("-foffload-abi=ilp32");
10476 /* On rs6000, function arguments are promoted, as are function return
10477 values. */
10479 static machine_mode
10480 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10481 machine_mode mode,
10482 int *punsignedp ATTRIBUTE_UNUSED,
10483 const_tree, int)
10485 PROMOTE_MODE (mode, *punsignedp, type);
10487 return mode;
10490 /* Return true if TYPE must be passed on the stack and not in registers. */
10492 static bool
10493 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10495 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10496 return must_pass_in_stack_var_size (mode, type);
10497 else
10498 return must_pass_in_stack_var_size_or_pad (mode, type);
10501 static inline bool
10502 is_complex_IBM_long_double (machine_mode mode)
10504 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
10507 /* Whether ABI_V4 passes MODE args to a function in floating point
10508 registers. */
10510 static bool
10511 abi_v4_pass_in_fpr (machine_mode mode)
10513 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
10514 return false;
10515 if (TARGET_SINGLE_FLOAT && mode == SFmode)
10516 return true;
10517 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
10518 return true;
10519 /* ABI_V4 passes complex IBM long double in 8 gprs.
10520 Stupid, but we can't change the ABI now. */
10521 if (is_complex_IBM_long_double (mode))
10522 return false;
10523 if (FLOAT128_2REG_P (mode))
10524 return true;
10525 if (DECIMAL_FLOAT_MODE_P (mode))
10526 return true;
10527 return false;
10530 /* If defined, a C expression which determines whether, and in which
10531 direction, to pad out an argument with extra space. The value
10532 should be of type `enum direction': either `upward' to pad above
10533 the argument, `downward' to pad below, or `none' to inhibit
10534 padding.
10536 For the AIX ABI structs are always stored left shifted in their
10537 argument slot. */
10539 enum direction
10540 function_arg_padding (machine_mode mode, const_tree type)
10542 #ifndef AGGREGATE_PADDING_FIXED
10543 #define AGGREGATE_PADDING_FIXED 0
10544 #endif
10545 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10546 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10547 #endif
10549 if (!AGGREGATE_PADDING_FIXED)
10551 /* GCC used to pass structures of the same size as integer types as
10552 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
10553 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10554 passed padded downward, except that -mstrict-align further
10555 muddied the water in that multi-component structures of 2 and 4
10556 bytes in size were passed padded upward.
10558 The following arranges for best compatibility with previous
10559 versions of gcc, but removes the -mstrict-align dependency. */
10560 if (BYTES_BIG_ENDIAN)
10562 HOST_WIDE_INT size = 0;
10564 if (mode == BLKmode)
10566 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10567 size = int_size_in_bytes (type);
10569 else
10570 size = GET_MODE_SIZE (mode);
10572 if (size == 1 || size == 2 || size == 4)
10573 return downward;
10575 return upward;
10578 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10580 if (type != 0 && AGGREGATE_TYPE_P (type))
10581 return upward;
10584 /* Fall back to the default. */
10585 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10588 /* If defined, a C expression that gives the alignment boundary, in bits,
10589 of an argument with the specified mode and type. If it is not defined,
10590 PARM_BOUNDARY is used for all arguments.
10592 V.4 wants long longs and doubles to be double word aligned. Just
10593 testing the mode size is a boneheaded way to do this as it means
10594 that other types such as complex int are also double word aligned.
10595 However, we're stuck with this because changing the ABI might break
10596 existing library interfaces.
10598 Doubleword align SPE vectors.
10599 Quadword align Altivec/VSX vectors.
10600 Quadword align large synthetic vector types. */
10602 static unsigned int
10603 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10605 machine_mode elt_mode;
10606 int n_elts;
10608 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10610 if (DEFAULT_ABI == ABI_V4
10611 && (GET_MODE_SIZE (mode) == 8
10612 || (TARGET_HARD_FLOAT
10613 && TARGET_FPRS
10614 && !is_complex_IBM_long_double (mode)
10615 && FLOAT128_2REG_P (mode))))
10616 return 64;
10617 else if (FLOAT128_VECTOR_P (mode))
10618 return 128;
10619 else if (SPE_VECTOR_MODE (mode)
10620 || (type && TREE_CODE (type) == VECTOR_TYPE
10621 && int_size_in_bytes (type) >= 8
10622 && int_size_in_bytes (type) < 16))
10623 return 64;
10624 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10625 || (type && TREE_CODE (type) == VECTOR_TYPE
10626 && int_size_in_bytes (type) >= 16))
10627 return 128;
10629 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10630 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10631 -mcompat-align-parm is used. */
10632 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10633 || DEFAULT_ABI == ABI_ELFv2)
10634 && type && TYPE_ALIGN (type) > 64)
10636 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10637 or homogeneous float/vector aggregates here. We already handled
10638 vector aggregates above, but still need to check for float here. */
10639 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10640 && !SCALAR_FLOAT_MODE_P (elt_mode));
10642 /* We used to check for BLKmode instead of the above aggregate type
10643 check. Warn when this results in any difference to the ABI. */
10644 if (aggregate_p != (mode == BLKmode))
10646 static bool warned;
10647 if (!warned && warn_psabi)
10649 warned = true;
10650 inform (input_location,
10651 "the ABI of passing aggregates with %d-byte alignment"
10652 " has changed in GCC 5",
10653 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10657 if (aggregate_p)
10658 return 128;
10661 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10662 implement the "aggregate type" check as a BLKmode check here; this
10663 means certain aggregate types are in fact not aligned. */
10664 if (TARGET_MACHO && rs6000_darwin64_abi
10665 && mode == BLKmode
10666 && type && TYPE_ALIGN (type) > 64)
10667 return 128;
10669 return PARM_BOUNDARY;
10672 /* The offset in words to the start of the parameter save area. */
10674 static unsigned int
10675 rs6000_parm_offset (void)
10677 return (DEFAULT_ABI == ABI_V4 ? 2
10678 : DEFAULT_ABI == ABI_ELFv2 ? 4
10679 : 6);
10682 /* For a function parm of MODE and TYPE, return the starting word in
10683 the parameter area. NWORDS of the parameter area are already used. */
10685 static unsigned int
10686 rs6000_parm_start (machine_mode mode, const_tree type,
10687 unsigned int nwords)
10689 unsigned int align;
10691 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10692 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10695 /* Compute the size (in words) of a function argument. */
10697 static unsigned long
10698 rs6000_arg_size (machine_mode mode, const_tree type)
10700 unsigned long size;
10702 if (mode != BLKmode)
10703 size = GET_MODE_SIZE (mode);
10704 else
10705 size = int_size_in_bytes (type);
10707 if (TARGET_32BIT)
10708 return (size + 3) >> 2;
10709 else
10710 return (size + 7) >> 3;
10713 /* Use this to flush pending int fields. */
10715 static void
10716 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10717 HOST_WIDE_INT bitpos, int final)
10719 unsigned int startbit, endbit;
10720 int intregs, intoffset;
10721 machine_mode mode;
10723 /* Handle the situations where a float is taking up the first half
10724 of the GPR, and the other half is empty (typically due to
10725 alignment restrictions). We can detect this by a 8-byte-aligned
10726 int field, or by seeing that this is the final flush for this
10727 argument. Count the word and continue on. */
10728 if (cum->floats_in_gpr == 1
10729 && (cum->intoffset % 64 == 0
10730 || (cum->intoffset == -1 && final)))
10732 cum->words++;
10733 cum->floats_in_gpr = 0;
10736 if (cum->intoffset == -1)
10737 return;
10739 intoffset = cum->intoffset;
10740 cum->intoffset = -1;
10741 cum->floats_in_gpr = 0;
10743 if (intoffset % BITS_PER_WORD != 0)
10745 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
10746 MODE_INT, 0);
10747 if (mode == BLKmode)
10749 /* We couldn't find an appropriate mode, which happens,
10750 e.g., in packed structs when there are 3 bytes to load.
10751 Back intoffset back to the beginning of the word in this
10752 case. */
10753 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10757 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10758 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10759 intregs = (endbit - startbit) / BITS_PER_WORD;
10760 cum->words += intregs;
10761 /* words should be unsigned. */
10762 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10764 int pad = (endbit/BITS_PER_WORD) - cum->words;
10765 cum->words += pad;
10769 /* The darwin64 ABI calls for us to recurse down through structs,
10770 looking for elements passed in registers. Unfortunately, we have
10771 to track int register count here also because of misalignments
10772 in powerpc alignment mode. */
10774 static void
10775 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10776 const_tree type,
10777 HOST_WIDE_INT startbitpos)
10779 tree f;
10781 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10782 if (TREE_CODE (f) == FIELD_DECL)
10784 HOST_WIDE_INT bitpos = startbitpos;
10785 tree ftype = TREE_TYPE (f);
10786 machine_mode mode;
10787 if (ftype == error_mark_node)
10788 continue;
10789 mode = TYPE_MODE (ftype);
10791 if (DECL_SIZE (f) != 0
10792 && tree_fits_uhwi_p (bit_position (f)))
10793 bitpos += int_bit_position (f);
10795 /* ??? FIXME: else assume zero offset. */
10797 if (TREE_CODE (ftype) == RECORD_TYPE)
10798 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10799 else if (USE_FP_FOR_ARG_P (cum, mode))
10801 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10802 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10803 cum->fregno += n_fpregs;
10804 /* Single-precision floats present a special problem for
10805 us, because they are smaller than an 8-byte GPR, and so
10806 the structure-packing rules combined with the standard
10807 varargs behavior mean that we want to pack float/float
10808 and float/int combinations into a single register's
10809 space. This is complicated by the arg advance flushing,
10810 which works on arbitrarily large groups of int-type
10811 fields. */
10812 if (mode == SFmode)
10814 if (cum->floats_in_gpr == 1)
10816 /* Two floats in a word; count the word and reset
10817 the float count. */
10818 cum->words++;
10819 cum->floats_in_gpr = 0;
10821 else if (bitpos % 64 == 0)
10823 /* A float at the beginning of an 8-byte word;
10824 count it and put off adjusting cum->words until
10825 we see if a arg advance flush is going to do it
10826 for us. */
10827 cum->floats_in_gpr++;
10829 else
10831 /* The float is at the end of a word, preceded
10832 by integer fields, so the arg advance flush
10833 just above has already set cum->words and
10834 everything is taken care of. */
10837 else
10838 cum->words += n_fpregs;
10840 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10842 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10843 cum->vregno++;
10844 cum->words += 2;
10846 else if (cum->intoffset == -1)
10847 cum->intoffset = bitpos;
10851 /* Check for an item that needs to be considered specially under the darwin 64
10852 bit ABI. These are record types where the mode is BLK or the structure is
10853 8 bytes in size. */
10854 static int
10855 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10857 return rs6000_darwin64_abi
10858 && ((mode == BLKmode
10859 && TREE_CODE (type) == RECORD_TYPE
10860 && int_size_in_bytes (type) > 0)
10861 || (type && TREE_CODE (type) == RECORD_TYPE
10862 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10865 /* Update the data in CUM to advance over an argument
10866 of mode MODE and data type TYPE.
10867 (TYPE is null for libcalls where that information may not be available.)
10869 Note that for args passed by reference, function_arg will be called
10870 with MODE and TYPE set to that of the pointer to the arg, not the arg
10871 itself. */
10873 static void
10874 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10875 const_tree type, bool named, int depth)
10877 machine_mode elt_mode;
10878 int n_elts;
10880 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10882 /* Only tick off an argument if we're not recursing. */
10883 if (depth == 0)
10884 cum->nargs_prototype--;
10886 #ifdef HAVE_AS_GNU_ATTRIBUTE
10887 if (DEFAULT_ABI == ABI_V4
10888 && cum->escapes)
10890 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode))
10891 rs6000_passes_float = true;
10892 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10893 rs6000_passes_vector = true;
10894 else if (SPE_VECTOR_MODE (mode)
10895 && !cum->stdarg
10896 && cum->sysv_gregno <= GP_ARG_MAX_REG)
10897 rs6000_passes_vector = true;
10899 #endif
10901 if (TARGET_ALTIVEC_ABI
10902 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10903 || (type && TREE_CODE (type) == VECTOR_TYPE
10904 && int_size_in_bytes (type) == 16)))
10906 bool stack = false;
10908 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10910 cum->vregno += n_elts;
10912 if (!TARGET_ALTIVEC)
10913 error ("cannot pass argument in vector register because"
10914 " altivec instructions are disabled, use -maltivec"
10915 " to enable them");
10917 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10918 even if it is going to be passed in a vector register.
10919 Darwin does the same for variable-argument functions. */
10920 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10921 && TARGET_64BIT)
10922 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10923 stack = true;
10925 else
10926 stack = true;
10928 if (stack)
10930 int align;
10932 /* Vector parameters must be 16-byte aligned. In 32-bit
10933 mode this means we need to take into account the offset
10934 to the parameter save area. In 64-bit mode, they just
10935 have to start on an even word, since the parameter save
10936 area is 16-byte aligned. */
10937 if (TARGET_32BIT)
10938 align = -(rs6000_parm_offset () + cum->words) & 3;
10939 else
10940 align = cum->words & 1;
10941 cum->words += align + rs6000_arg_size (mode, type);
10943 if (TARGET_DEBUG_ARG)
10945 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
10946 cum->words, align);
10947 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
10948 cum->nargs_prototype, cum->prototype,
10949 GET_MODE_NAME (mode));
10953 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
10954 && !cum->stdarg
10955 && cum->sysv_gregno <= GP_ARG_MAX_REG)
10956 cum->sysv_gregno++;
10958 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10960 int size = int_size_in_bytes (type);
10961 /* Variable sized types have size == -1 and are
10962 treated as if consisting entirely of ints.
10963 Pad to 16 byte boundary if needed. */
10964 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10965 && (cum->words % 2) != 0)
10966 cum->words++;
10967 /* For varargs, we can just go up by the size of the struct. */
10968 if (!named)
10969 cum->words += (size + 7) / 8;
10970 else
10972 /* It is tempting to say int register count just goes up by
10973 sizeof(type)/8, but this is wrong in a case such as
10974 { int; double; int; } [powerpc alignment]. We have to
10975 grovel through the fields for these too. */
10976 cum->intoffset = 0;
10977 cum->floats_in_gpr = 0;
10978 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
10979 rs6000_darwin64_record_arg_advance_flush (cum,
10980 size * BITS_PER_UNIT, 1);
10982 if (TARGET_DEBUG_ARG)
10984 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
10985 cum->words, TYPE_ALIGN (type), size);
10986 fprintf (stderr,
10987 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
10988 cum->nargs_prototype, cum->prototype,
10989 GET_MODE_NAME (mode));
10992 else if (DEFAULT_ABI == ABI_V4)
10994 if (abi_v4_pass_in_fpr (mode))
10996 /* _Decimal128 must use an even/odd register pair. This assumes
10997 that the register number is odd when fregno is odd. */
10998 if (mode == TDmode && (cum->fregno % 2) == 1)
10999 cum->fregno++;
11001 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11002 <= FP_ARG_V4_MAX_REG)
11003 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11004 else
11006 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11007 if (mode == DFmode || FLOAT128_IBM_P (mode)
11008 || mode == DDmode || mode == TDmode)
11009 cum->words += cum->words & 1;
11010 cum->words += rs6000_arg_size (mode, type);
11013 else
11015 int n_words = rs6000_arg_size (mode, type);
11016 int gregno = cum->sysv_gregno;
11018 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11019 (r7,r8) or (r9,r10). As does any other 2 word item such
11020 as complex int due to a historical mistake. */
11021 if (n_words == 2)
11022 gregno += (1 - gregno) & 1;
11024 /* Multi-reg args are not split between registers and stack. */
11025 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11027 /* Long long and SPE vectors are aligned on the stack.
11028 So are other 2 word items such as complex int due to
11029 a historical mistake. */
11030 if (n_words == 2)
11031 cum->words += cum->words & 1;
11032 cum->words += n_words;
11035 /* Note: continuing to accumulate gregno past when we've started
11036 spilling to the stack indicates the fact that we've started
11037 spilling to the stack to expand_builtin_saveregs. */
11038 cum->sysv_gregno = gregno + n_words;
11041 if (TARGET_DEBUG_ARG)
11043 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11044 cum->words, cum->fregno);
11045 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11046 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11047 fprintf (stderr, "mode = %4s, named = %d\n",
11048 GET_MODE_NAME (mode), named);
11051 else
11053 int n_words = rs6000_arg_size (mode, type);
11054 int start_words = cum->words;
11055 int align_words = rs6000_parm_start (mode, type, start_words);
11057 cum->words = align_words + n_words;
11059 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
11061 /* _Decimal128 must be passed in an even/odd float register pair.
11062 This assumes that the register number is odd when fregno is
11063 odd. */
11064 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11065 cum->fregno++;
11066 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11069 if (TARGET_DEBUG_ARG)
11071 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11072 cum->words, cum->fregno);
11073 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11074 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11075 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11076 named, align_words - start_words, depth);
11081 static void
11082 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11083 const_tree type, bool named)
11085 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11089 static rtx
11090 spe_build_register_parallel (machine_mode mode, int gregno)
11092 rtx r1, r3, r5, r7;
11094 switch (mode)
11096 case DFmode:
11097 r1 = gen_rtx_REG (DImode, gregno);
11098 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11099 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
11101 case DCmode:
11102 case TFmode:
11103 r1 = gen_rtx_REG (DImode, gregno);
11104 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11105 r3 = gen_rtx_REG (DImode, gregno + 2);
11106 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11107 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
11109 case TCmode:
11110 r1 = gen_rtx_REG (DImode, gregno);
11111 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
11112 r3 = gen_rtx_REG (DImode, gregno + 2);
11113 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
11114 r5 = gen_rtx_REG (DImode, gregno + 4);
11115 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
11116 r7 = gen_rtx_REG (DImode, gregno + 6);
11117 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
11118 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
11120 default:
11121 gcc_unreachable ();
11125 /* Determine where to put a SIMD argument on the SPE. */
11126 static rtx
11127 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
11128 const_tree type)
11130 int gregno = cum->sysv_gregno;
11132 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
11133 are passed and returned in a pair of GPRs for ABI compatibility. */
11134 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
11135 || mode == DCmode || mode == TCmode))
11137 int n_words = rs6000_arg_size (mode, type);
11139 /* Doubles go in an odd/even register pair (r5/r6, etc). */
11140 if (mode == DFmode)
11141 gregno += (1 - gregno) & 1;
11143 /* Multi-reg args are not split between registers and stack. */
11144 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11145 return NULL_RTX;
11147 return spe_build_register_parallel (mode, gregno);
11149 if (cum->stdarg)
11151 int n_words = rs6000_arg_size (mode, type);
11153 /* SPE vectors are put in odd registers. */
11154 if (n_words == 2 && (gregno & 1) == 0)
11155 gregno += 1;
11157 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
11159 rtx r1, r2;
11160 machine_mode m = SImode;
11162 r1 = gen_rtx_REG (m, gregno);
11163 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
11164 r2 = gen_rtx_REG (m, gregno + 1);
11165 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
11166 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
11168 else
11169 return NULL_RTX;
11171 else
11173 if (gregno <= GP_ARG_MAX_REG)
11174 return gen_rtx_REG (mode, gregno);
11175 else
11176 return NULL_RTX;
11180 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11181 structure between cum->intoffset and bitpos to integer registers. */
11183 static void
11184 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11185 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11187 machine_mode mode;
11188 unsigned int regno;
11189 unsigned int startbit, endbit;
11190 int this_regno, intregs, intoffset;
11191 rtx reg;
11193 if (cum->intoffset == -1)
11194 return;
11196 intoffset = cum->intoffset;
11197 cum->intoffset = -1;
11199 /* If this is the trailing part of a word, try to only load that
11200 much into the register. Otherwise load the whole register. Note
11201 that in the latter case we may pick up unwanted bits. It's not a
11202 problem at the moment but may wish to revisit. */
11204 if (intoffset % BITS_PER_WORD != 0)
11206 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11207 MODE_INT, 0);
11208 if (mode == BLKmode)
11210 /* We couldn't find an appropriate mode, which happens,
11211 e.g., in packed structs when there are 3 bytes to load.
11212 Back intoffset back to the beginning of the word in this
11213 case. */
11214 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11215 mode = word_mode;
11218 else
11219 mode = word_mode;
11221 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11222 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11223 intregs = (endbit - startbit) / BITS_PER_WORD;
11224 this_regno = cum->words + intoffset / BITS_PER_WORD;
11226 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11227 cum->use_stack = 1;
11229 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11230 if (intregs <= 0)
11231 return;
11233 intoffset /= BITS_PER_UNIT;
11236 regno = GP_ARG_MIN_REG + this_regno;
11237 reg = gen_rtx_REG (mode, regno);
11238 rvec[(*k)++] =
11239 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11241 this_regno += 1;
11242 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11243 mode = word_mode;
11244 intregs -= 1;
11246 while (intregs > 0);
11249 /* Recursive workhorse for the following. */
11251 static void
11252 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11253 HOST_WIDE_INT startbitpos, rtx rvec[],
11254 int *k)
11256 tree f;
11258 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11259 if (TREE_CODE (f) == FIELD_DECL)
11261 HOST_WIDE_INT bitpos = startbitpos;
11262 tree ftype = TREE_TYPE (f);
11263 machine_mode mode;
11264 if (ftype == error_mark_node)
11265 continue;
11266 mode = TYPE_MODE (ftype);
11268 if (DECL_SIZE (f) != 0
11269 && tree_fits_uhwi_p (bit_position (f)))
11270 bitpos += int_bit_position (f);
11272 /* ??? FIXME: else assume zero offset. */
11274 if (TREE_CODE (ftype) == RECORD_TYPE)
11275 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11276 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11278 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11279 #if 0
11280 switch (mode)
11282 case SCmode: mode = SFmode; break;
11283 case DCmode: mode = DFmode; break;
11284 case TCmode: mode = TFmode; break;
11285 default: break;
11287 #endif
11288 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11289 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11291 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11292 && (mode == TFmode || mode == TDmode));
11293 /* Long double or _Decimal128 split over regs and memory. */
11294 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11295 cum->use_stack=1;
11297 rvec[(*k)++]
11298 = gen_rtx_EXPR_LIST (VOIDmode,
11299 gen_rtx_REG (mode, cum->fregno++),
11300 GEN_INT (bitpos / BITS_PER_UNIT));
11301 if (FLOAT128_2REG_P (mode))
11302 cum->fregno++;
11304 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11306 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11307 rvec[(*k)++]
11308 = gen_rtx_EXPR_LIST (VOIDmode,
11309 gen_rtx_REG (mode, cum->vregno++),
11310 GEN_INT (bitpos / BITS_PER_UNIT));
11312 else if (cum->intoffset == -1)
11313 cum->intoffset = bitpos;
11317 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11318 the register(s) to be used for each field and subfield of a struct
11319 being passed by value, along with the offset of where the
11320 register's value may be found in the block. FP fields go in FP
11321 register, vector fields go in vector registers, and everything
11322 else goes in int registers, packed as in memory.
11324 This code is also used for function return values. RETVAL indicates
11325 whether this is the case.
11327 Much of this is taken from the SPARC V9 port, which has a similar
11328 calling convention. */
11330 static rtx
11331 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11332 bool named, bool retval)
11334 rtx rvec[FIRST_PSEUDO_REGISTER];
11335 int k = 1, kbase = 1;
11336 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11337 /* This is a copy; modifications are not visible to our caller. */
11338 CUMULATIVE_ARGS copy_cum = *orig_cum;
11339 CUMULATIVE_ARGS *cum = &copy_cum;
11341 /* Pad to 16 byte boundary if needed. */
11342 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11343 && (cum->words % 2) != 0)
11344 cum->words++;
11346 cum->intoffset = 0;
11347 cum->use_stack = 0;
11348 cum->named = named;
11350 /* Put entries into rvec[] for individual FP and vector fields, and
11351 for the chunks of memory that go in int regs. Note we start at
11352 element 1; 0 is reserved for an indication of using memory, and
11353 may or may not be filled in below. */
11354 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11355 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11357 /* If any part of the struct went on the stack put all of it there.
11358 This hack is because the generic code for
11359 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11360 parts of the struct are not at the beginning. */
11361 if (cum->use_stack)
11363 if (retval)
11364 return NULL_RTX; /* doesn't go in registers at all */
11365 kbase = 0;
11366 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11368 if (k > 1 || cum->use_stack)
11369 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11370 else
11371 return NULL_RTX;
11374 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11376 static rtx
11377 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11378 int align_words)
11380 int n_units;
11381 int i, k;
11382 rtx rvec[GP_ARG_NUM_REG + 1];
11384 if (align_words >= GP_ARG_NUM_REG)
11385 return NULL_RTX;
11387 n_units = rs6000_arg_size (mode, type);
11389 /* Optimize the simple case where the arg fits in one gpr, except in
11390 the case of BLKmode due to assign_parms assuming that registers are
11391 BITS_PER_WORD wide. */
11392 if (n_units == 0
11393 || (n_units == 1 && mode != BLKmode))
11394 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11396 k = 0;
11397 if (align_words + n_units > GP_ARG_NUM_REG)
11398 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11399 using a magic NULL_RTX component.
11400 This is not strictly correct. Only some of the arg belongs in
11401 memory, not all of it. However, the normal scheme using
11402 function_arg_partial_nregs can result in unusual subregs, eg.
11403 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11404 store the whole arg to memory is often more efficient than code
11405 to store pieces, and we know that space is available in the right
11406 place for the whole arg. */
11407 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11409 i = 0;
11412 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11413 rtx off = GEN_INT (i++ * 4);
11414 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11416 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11418 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11421 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11422 but must also be copied into the parameter save area starting at
11423 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11424 to the GPRs and/or memory. Return the number of elements used. */
11426 static int
11427 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11428 int align_words, rtx *rvec)
11430 int k = 0;
11432 if (align_words < GP_ARG_NUM_REG)
11434 int n_words = rs6000_arg_size (mode, type);
11436 if (align_words + n_words > GP_ARG_NUM_REG
11437 || mode == BLKmode
11438 || (TARGET_32BIT && TARGET_POWERPC64))
11440 /* If this is partially on the stack, then we only
11441 include the portion actually in registers here. */
11442 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11443 int i = 0;
11445 if (align_words + n_words > GP_ARG_NUM_REG)
11447 /* Not all of the arg fits in gprs. Say that it goes in memory
11448 too, using a magic NULL_RTX component. Also see comment in
11449 rs6000_mixed_function_arg for why the normal
11450 function_arg_partial_nregs scheme doesn't work in this case. */
11451 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11456 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11457 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11458 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11460 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11462 else
11464 /* The whole arg fits in gprs. */
11465 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11466 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11469 else
11471 /* It's entirely in memory. */
11472 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11475 return k;
11478 /* RVEC is a vector of K components of an argument of mode MODE.
11479 Construct the final function_arg return value from it. */
11481 static rtx
11482 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11484 gcc_assert (k >= 1);
11486 /* Avoid returning a PARALLEL in the trivial cases. */
11487 if (k == 1)
11489 if (XEXP (rvec[0], 0) == NULL_RTX)
11490 return NULL_RTX;
11492 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11493 return XEXP (rvec[0], 0);
11496 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11499 /* Determine where to put an argument to a function.
11500 Value is zero to push the argument on the stack,
11501 or a hard register in which to store the argument.
11503 MODE is the argument's machine mode.
11504 TYPE is the data type of the argument (as a tree).
11505 This is null for libcalls where that information may
11506 not be available.
11507 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11508 the preceding args and about the function being called. It is
11509 not modified in this routine.
11510 NAMED is nonzero if this argument is a named parameter
11511 (otherwise it is an extra parameter matching an ellipsis).
11513 On RS/6000 the first eight words of non-FP are normally in registers
11514 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11515 Under V.4, the first 8 FP args are in registers.
11517 If this is floating-point and no prototype is specified, we use
11518 both an FP and integer register (or possibly FP reg and stack). Library
11519 functions (when CALL_LIBCALL is set) always have the proper types for args,
11520 so we can pass the FP value just in one register. emit_library_function
11521 doesn't support PARALLEL anyway.
11523 Note that for args passed by reference, function_arg will be called
11524 with MODE and TYPE set to that of the pointer to the arg, not the arg
11525 itself. */
11527 static rtx
11528 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11529 const_tree type, bool named)
11531 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11532 enum rs6000_abi abi = DEFAULT_ABI;
11533 machine_mode elt_mode;
11534 int n_elts;
11536 /* Return a marker to indicate whether CR1 needs to set or clear the
11537 bit that V.4 uses to say fp args were passed in registers.
11538 Assume that we don't need the marker for software floating point,
11539 or compiler generated library calls. */
11540 if (mode == VOIDmode)
11542 if (abi == ABI_V4
11543 && (cum->call_cookie & CALL_LIBCALL) == 0
11544 && (cum->stdarg
11545 || (cum->nargs_prototype < 0
11546 && (cum->prototype || TARGET_NO_PROTOTYPE))))
11548 /* For the SPE, we need to crxor CR6 always. */
11549 if (TARGET_SPE_ABI)
11550 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
11551 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
11552 return GEN_INT (cum->call_cookie
11553 | ((cum->fregno == FP_ARG_MIN_REG)
11554 ? CALL_V4_SET_FP_ARGS
11555 : CALL_V4_CLEAR_FP_ARGS));
11558 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11561 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11563 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11565 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11566 if (rslt != NULL_RTX)
11567 return rslt;
11568 /* Else fall through to usual handling. */
11571 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11573 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11574 rtx r, off;
11575 int i, k = 0;
11577 /* Do we also need to pass this argument in the parameter save area?
11578 Library support functions for IEEE 128-bit are assumed to not need the
11579 value passed both in GPRs and in vector registers. */
11580 if (TARGET_64BIT && !cum->prototype
11581 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11583 int align_words = ROUND_UP (cum->words, 2);
11584 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11587 /* Describe where this argument goes in the vector registers. */
11588 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11590 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11591 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11592 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11595 return rs6000_finish_function_arg (mode, rvec, k);
11597 else if (TARGET_ALTIVEC_ABI
11598 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11599 || (type && TREE_CODE (type) == VECTOR_TYPE
11600 && int_size_in_bytes (type) == 16)))
11602 if (named || abi == ABI_V4)
11603 return NULL_RTX;
11604 else
11606 /* Vector parameters to varargs functions under AIX or Darwin
11607 get passed in memory and possibly also in GPRs. */
11608 int align, align_words, n_words;
11609 machine_mode part_mode;
11611 /* Vector parameters must be 16-byte aligned. In 32-bit
11612 mode this means we need to take into account the offset
11613 to the parameter save area. In 64-bit mode, they just
11614 have to start on an even word, since the parameter save
11615 area is 16-byte aligned. */
11616 if (TARGET_32BIT)
11617 align = -(rs6000_parm_offset () + cum->words) & 3;
11618 else
11619 align = cum->words & 1;
11620 align_words = cum->words + align;
11622 /* Out of registers? Memory, then. */
11623 if (align_words >= GP_ARG_NUM_REG)
11624 return NULL_RTX;
11626 if (TARGET_32BIT && TARGET_POWERPC64)
11627 return rs6000_mixed_function_arg (mode, type, align_words);
11629 /* The vector value goes in GPRs. Only the part of the
11630 value in GPRs is reported here. */
11631 part_mode = mode;
11632 n_words = rs6000_arg_size (mode, type);
11633 if (align_words + n_words > GP_ARG_NUM_REG)
11634 /* Fortunately, there are only two possibilities, the value
11635 is either wholly in GPRs or half in GPRs and half not. */
11636 part_mode = DImode;
11638 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11641 else if (TARGET_SPE_ABI && TARGET_SPE
11642 && (SPE_VECTOR_MODE (mode)
11643 || (TARGET_E500_DOUBLE && (mode == DFmode
11644 || mode == DCmode
11645 || mode == TFmode
11646 || mode == TCmode))))
11647 return rs6000_spe_function_arg (cum, mode, type);
11649 else if (abi == ABI_V4)
11651 if (abi_v4_pass_in_fpr (mode))
11653 /* _Decimal128 must use an even/odd register pair. This assumes
11654 that the register number is odd when fregno is odd. */
11655 if (mode == TDmode && (cum->fregno % 2) == 1)
11656 cum->fregno++;
11658 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11659 <= FP_ARG_V4_MAX_REG)
11660 return gen_rtx_REG (mode, cum->fregno);
11661 else
11662 return NULL_RTX;
11664 else
11666 int n_words = rs6000_arg_size (mode, type);
11667 int gregno = cum->sysv_gregno;
11669 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
11670 (r7,r8) or (r9,r10). As does any other 2 word item such
11671 as complex int due to a historical mistake. */
11672 if (n_words == 2)
11673 gregno += (1 - gregno) & 1;
11675 /* Multi-reg args are not split between registers and stack. */
11676 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11677 return NULL_RTX;
11679 if (TARGET_32BIT && TARGET_POWERPC64)
11680 return rs6000_mixed_function_arg (mode, type,
11681 gregno - GP_ARG_MIN_REG);
11682 return gen_rtx_REG (mode, gregno);
11685 else
11687 int align_words = rs6000_parm_start (mode, type, cum->words);
11689 /* _Decimal128 must be passed in an even/odd float register pair.
11690 This assumes that the register number is odd when fregno is odd. */
11691 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11692 cum->fregno++;
11694 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11696 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11697 rtx r, off;
11698 int i, k = 0;
11699 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11700 int fpr_words;
11702 /* Do we also need to pass this argument in the parameter
11703 save area? */
11704 if (type && (cum->nargs_prototype <= 0
11705 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11706 && TARGET_XL_COMPAT
11707 && align_words >= GP_ARG_NUM_REG)))
11708 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11710 /* Describe where this argument goes in the fprs. */
11711 for (i = 0; i < n_elts
11712 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11714 /* Check if the argument is split over registers and memory.
11715 This can only ever happen for long double or _Decimal128;
11716 complex types are handled via split_complex_arg. */
11717 machine_mode fmode = elt_mode;
11718 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11720 gcc_assert (FLOAT128_2REG_P (fmode));
11721 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11724 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11725 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11726 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11729 /* If there were not enough FPRs to hold the argument, the rest
11730 usually goes into memory. However, if the current position
11731 is still within the register parameter area, a portion may
11732 actually have to go into GPRs.
11734 Note that it may happen that the portion of the argument
11735 passed in the first "half" of the first GPR was already
11736 passed in the last FPR as well.
11738 For unnamed arguments, we already set up GPRs to cover the
11739 whole argument in rs6000_psave_function_arg, so there is
11740 nothing further to do at this point. */
11741 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11742 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11743 && cum->nargs_prototype > 0)
11745 static bool warned;
11747 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11748 int n_words = rs6000_arg_size (mode, type);
11750 align_words += fpr_words;
11751 n_words -= fpr_words;
11755 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11756 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11757 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11759 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11761 if (!warned && warn_psabi)
11763 warned = true;
11764 inform (input_location,
11765 "the ABI of passing homogeneous float aggregates"
11766 " has changed in GCC 5");
11770 return rs6000_finish_function_arg (mode, rvec, k);
11772 else if (align_words < GP_ARG_NUM_REG)
11774 if (TARGET_32BIT && TARGET_POWERPC64)
11775 return rs6000_mixed_function_arg (mode, type, align_words);
11777 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11779 else
11780 return NULL_RTX;
11784 /* For an arg passed partly in registers and partly in memory, this is
11785 the number of bytes passed in registers. For args passed entirely in
11786 registers or entirely in memory, zero. When an arg is described by a
11787 PARALLEL, perhaps using more than one register type, this function
11788 returns the number of bytes used by the first element of the PARALLEL. */
11790 static int
11791 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11792 tree type, bool named)
11794 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11795 bool passed_in_gprs = true;
11796 int ret = 0;
11797 int align_words;
11798 machine_mode elt_mode;
11799 int n_elts;
11801 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11803 if (DEFAULT_ABI == ABI_V4)
11804 return 0;
11806 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11808 /* If we are passing this arg in the fixed parameter save area (gprs or
11809 memory) as well as VRs, we do not use the partial bytes mechanism;
11810 instead, rs6000_function_arg will return a PARALLEL including a memory
11811 element as necessary. Library support functions for IEEE 128-bit are
11812 assumed to not need the value passed both in GPRs and in vector
11813 registers. */
11814 if (TARGET_64BIT && !cum->prototype
11815 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11816 return 0;
11818 /* Otherwise, we pass in VRs only. Check for partial copies. */
11819 passed_in_gprs = false;
11820 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11821 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11824 /* In this complicated case we just disable the partial_nregs code. */
11825 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11826 return 0;
11828 align_words = rs6000_parm_start (mode, type, cum->words);
11830 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11832 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11834 /* If we are passing this arg in the fixed parameter save area
11835 (gprs or memory) as well as FPRs, we do not use the partial
11836 bytes mechanism; instead, rs6000_function_arg will return a
11837 PARALLEL including a memory element as necessary. */
11838 if (type
11839 && (cum->nargs_prototype <= 0
11840 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11841 && TARGET_XL_COMPAT
11842 && align_words >= GP_ARG_NUM_REG)))
11843 return 0;
11845 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11846 passed_in_gprs = false;
11847 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11849 /* Compute number of bytes / words passed in FPRs. If there
11850 is still space available in the register parameter area
11851 *after* that amount, a part of the argument will be passed
11852 in GPRs. In that case, the total amount passed in any
11853 registers is equal to the amount that would have been passed
11854 in GPRs if everything were passed there, so we fall back to
11855 the GPR code below to compute the appropriate value. */
11856 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11857 * MIN (8, GET_MODE_SIZE (elt_mode)));
11858 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11860 if (align_words + fpr_words < GP_ARG_NUM_REG)
11861 passed_in_gprs = true;
11862 else
11863 ret = fpr;
11867 if (passed_in_gprs
11868 && align_words < GP_ARG_NUM_REG
11869 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11870 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11872 if (ret != 0 && TARGET_DEBUG_ARG)
11873 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11875 return ret;
11878 /* A C expression that indicates when an argument must be passed by
11879 reference. If nonzero for an argument, a copy of that argument is
11880 made in memory and a pointer to the argument is passed instead of
11881 the argument itself. The pointer is passed in whatever way is
11882 appropriate for passing a pointer to that type.
11884 Under V.4, aggregates and long double are passed by reference.
11886 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11887 reference unless the AltiVec vector extension ABI is in force.
11889 As an extension to all ABIs, variable sized types are passed by
11890 reference. */
11892 static bool
11893 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11894 machine_mode mode, const_tree type,
11895 bool named ATTRIBUTE_UNUSED)
11897 if (!type)
11898 return 0;
11900 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11901 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11903 if (TARGET_DEBUG_ARG)
11904 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11905 return 1;
11908 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11910 if (TARGET_DEBUG_ARG)
11911 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11912 return 1;
11915 if (int_size_in_bytes (type) < 0)
11917 if (TARGET_DEBUG_ARG)
11918 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11919 return 1;
11922 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11923 modes only exist for GCC vector types if -maltivec. */
11924 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11926 if (TARGET_DEBUG_ARG)
11927 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11928 return 1;
11931 /* Pass synthetic vectors in memory. */
11932 if (TREE_CODE (type) == VECTOR_TYPE
11933 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11935 static bool warned_for_pass_big_vectors = false;
11936 if (TARGET_DEBUG_ARG)
11937 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11938 if (!warned_for_pass_big_vectors)
11940 warning (0, "GCC vector passed by reference: "
11941 "non-standard ABI extension with no compatibility guarantee");
11942 warned_for_pass_big_vectors = true;
11944 return 1;
11947 return 0;
11950 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11951 already processes. Return true if the parameter must be passed
11952 (fully or partially) on the stack. */
11954 static bool
11955 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11957 machine_mode mode;
11958 int unsignedp;
11959 rtx entry_parm;
11961 /* Catch errors. */
11962 if (type == NULL || type == error_mark_node)
11963 return true;
11965 /* Handle types with no storage requirement. */
11966 if (TYPE_MODE (type) == VOIDmode)
11967 return false;
11969 /* Handle complex types. */
11970 if (TREE_CODE (type) == COMPLEX_TYPE)
11971 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11972 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11974 /* Handle transparent aggregates. */
11975 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11976 && TYPE_TRANSPARENT_AGGR (type))
11977 type = TREE_TYPE (first_field (type));
11979 /* See if this arg was passed by invisible reference. */
11980 if (pass_by_reference (get_cumulative_args (args_so_far),
11981 TYPE_MODE (type), type, true))
11982 type = build_pointer_type (type);
11984 /* Find mode as it is passed by the ABI. */
11985 unsignedp = TYPE_UNSIGNED (type);
11986 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11988 /* If we must pass in stack, we need a stack. */
11989 if (rs6000_must_pass_in_stack (mode, type))
11990 return true;
11992 /* If there is no incoming register, we need a stack. */
11993 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11994 if (entry_parm == NULL)
11995 return true;
11997 /* Likewise if we need to pass both in registers and on the stack. */
11998 if (GET_CODE (entry_parm) == PARALLEL
11999 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12000 return true;
12002 /* Also true if we're partially in registers and partially not. */
12003 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12004 return true;
12006 /* Update info on where next arg arrives in registers. */
12007 rs6000_function_arg_advance (args_so_far, mode, type, true);
12008 return false;
12011 /* Return true if FUN has no prototype, has a variable argument
12012 list, or passes any parameter in memory. */
12014 static bool
12015 rs6000_function_parms_need_stack (tree fun, bool incoming)
12017 tree fntype, result;
12018 CUMULATIVE_ARGS args_so_far_v;
12019 cumulative_args_t args_so_far;
12021 if (!fun)
12022 /* Must be a libcall, all of which only use reg parms. */
12023 return false;
12025 fntype = fun;
12026 if (!TYPE_P (fun))
12027 fntype = TREE_TYPE (fun);
12029 /* Varargs functions need the parameter save area. */
12030 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12031 return true;
12033 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12034 args_so_far = pack_cumulative_args (&args_so_far_v);
12036 /* When incoming, we will have been passed the function decl.
12037 It is necessary to use the decl to handle K&R style functions,
12038 where TYPE_ARG_TYPES may not be available. */
12039 if (incoming)
12041 gcc_assert (DECL_P (fun));
12042 result = DECL_RESULT (fun);
12044 else
12045 result = TREE_TYPE (fntype);
12047 if (result && aggregate_value_p (result, fntype))
12049 if (!TYPE_P (result))
12050 result = TREE_TYPE (result);
12051 result = build_pointer_type (result);
12052 rs6000_parm_needs_stack (args_so_far, result);
12055 if (incoming)
12057 tree parm;
12059 for (parm = DECL_ARGUMENTS (fun);
12060 parm && parm != void_list_node;
12061 parm = TREE_CHAIN (parm))
12062 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12063 return true;
12065 else
12067 function_args_iterator args_iter;
12068 tree arg_type;
12070 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12071 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12072 return true;
12075 return false;
12078 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12079 usually a constant depending on the ABI. However, in the ELFv2 ABI
12080 the register parameter area is optional when calling a function that
12081 has a prototype is scope, has no variable argument list, and passes
12082 all parameters in registers. */
12085 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12087 int reg_parm_stack_space;
12089 switch (DEFAULT_ABI)
12091 default:
12092 reg_parm_stack_space = 0;
12093 break;
12095 case ABI_AIX:
12096 case ABI_DARWIN:
12097 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12098 break;
12100 case ABI_ELFv2:
12101 /* ??? Recomputing this every time is a bit expensive. Is there
12102 a place to cache this information? */
12103 if (rs6000_function_parms_need_stack (fun, incoming))
12104 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12105 else
12106 reg_parm_stack_space = 0;
12107 break;
12110 return reg_parm_stack_space;
12113 static void
12114 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12116 int i;
12117 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12119 if (nregs == 0)
12120 return;
12122 for (i = 0; i < nregs; i++)
12124 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12125 if (reload_completed)
12127 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12128 tem = NULL_RTX;
12129 else
12130 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12131 i * GET_MODE_SIZE (reg_mode));
12133 else
12134 tem = replace_equiv_address (tem, XEXP (tem, 0));
12136 gcc_assert (tem);
12138 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12142 /* Perform any needed actions needed for a function that is receiving a
12143 variable number of arguments.
12145 CUM is as above.
12147 MODE and TYPE are the mode and type of the current parameter.
12149 PRETEND_SIZE is a variable that should be set to the amount of stack
12150 that must be pushed by the prolog to pretend that our caller pushed
12153 Normally, this macro will push all remaining incoming registers on the
12154 stack and set PRETEND_SIZE to the length of the registers pushed. */
12156 static void
12157 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12158 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12159 int no_rtl)
12161 CUMULATIVE_ARGS next_cum;
12162 int reg_size = TARGET_32BIT ? 4 : 8;
12163 rtx save_area = NULL_RTX, mem;
12164 int first_reg_offset;
12165 alias_set_type set;
12167 /* Skip the last named argument. */
12168 next_cum = *get_cumulative_args (cum);
12169 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12171 if (DEFAULT_ABI == ABI_V4)
12173 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12175 if (! no_rtl)
12177 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12178 HOST_WIDE_INT offset = 0;
12180 /* Try to optimize the size of the varargs save area.
12181 The ABI requires that ap.reg_save_area is doubleword
12182 aligned, but we don't need to allocate space for all
12183 the bytes, only those to which we actually will save
12184 anything. */
12185 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12186 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12187 if (TARGET_HARD_FLOAT && TARGET_FPRS
12188 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12189 && cfun->va_list_fpr_size)
12191 if (gpr_reg_num)
12192 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12193 * UNITS_PER_FP_WORD;
12194 if (cfun->va_list_fpr_size
12195 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12196 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12197 else
12198 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12199 * UNITS_PER_FP_WORD;
12201 if (gpr_reg_num)
12203 offset = -((first_reg_offset * reg_size) & ~7);
12204 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12206 gpr_reg_num = cfun->va_list_gpr_size;
12207 if (reg_size == 4 && (first_reg_offset & 1))
12208 gpr_reg_num++;
12210 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12212 else if (fpr_size)
12213 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12214 * UNITS_PER_FP_WORD
12215 - (int) (GP_ARG_NUM_REG * reg_size);
12217 if (gpr_size + fpr_size)
12219 rtx reg_save_area
12220 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12221 gcc_assert (GET_CODE (reg_save_area) == MEM);
12222 reg_save_area = XEXP (reg_save_area, 0);
12223 if (GET_CODE (reg_save_area) == PLUS)
12225 gcc_assert (XEXP (reg_save_area, 0)
12226 == virtual_stack_vars_rtx);
12227 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12228 offset += INTVAL (XEXP (reg_save_area, 1));
12230 else
12231 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12234 cfun->machine->varargs_save_offset = offset;
12235 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12238 else
12240 first_reg_offset = next_cum.words;
12241 save_area = crtl->args.internal_arg_pointer;
12243 if (targetm.calls.must_pass_in_stack (mode, type))
12244 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12247 set = get_varargs_alias_set ();
12248 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12249 && cfun->va_list_gpr_size)
12251 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12253 if (va_list_gpr_counter_field)
12254 /* V4 va_list_gpr_size counts number of registers needed. */
12255 n_gpr = cfun->va_list_gpr_size;
12256 else
12257 /* char * va_list instead counts number of bytes needed. */
12258 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12260 if (nregs > n_gpr)
12261 nregs = n_gpr;
12263 mem = gen_rtx_MEM (BLKmode,
12264 plus_constant (Pmode, save_area,
12265 first_reg_offset * reg_size));
12266 MEM_NOTRAP_P (mem) = 1;
12267 set_mem_alias_set (mem, set);
12268 set_mem_align (mem, BITS_PER_WORD);
12270 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12271 nregs);
12274 /* Save FP registers if needed. */
12275 if (DEFAULT_ABI == ABI_V4
12276 && TARGET_HARD_FLOAT && TARGET_FPRS
12277 && ! no_rtl
12278 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12279 && cfun->va_list_fpr_size)
12281 int fregno = next_cum.fregno, nregs;
12282 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12283 rtx lab = gen_label_rtx ();
12284 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12285 * UNITS_PER_FP_WORD);
12287 emit_jump_insn
12288 (gen_rtx_SET (pc_rtx,
12289 gen_rtx_IF_THEN_ELSE (VOIDmode,
12290 gen_rtx_NE (VOIDmode, cr1,
12291 const0_rtx),
12292 gen_rtx_LABEL_REF (VOIDmode, lab),
12293 pc_rtx)));
12295 for (nregs = 0;
12296 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12297 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12299 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12300 ? DFmode : SFmode,
12301 plus_constant (Pmode, save_area, off));
12302 MEM_NOTRAP_P (mem) = 1;
12303 set_mem_alias_set (mem, set);
12304 set_mem_align (mem, GET_MODE_ALIGNMENT (
12305 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12306 ? DFmode : SFmode));
12307 emit_move_insn (mem, gen_rtx_REG (
12308 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
12309 ? DFmode : SFmode, fregno));
12312 emit_label (lab);
12316 /* Create the va_list data type. */
12318 static tree
12319 rs6000_build_builtin_va_list (void)
12321 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12323 /* For AIX, prefer 'char *' because that's what the system
12324 header files like. */
12325 if (DEFAULT_ABI != ABI_V4)
12326 return build_pointer_type (char_type_node);
12328 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12329 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12330 get_identifier ("__va_list_tag"), record);
12332 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12333 unsigned_char_type_node);
12334 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12335 unsigned_char_type_node);
12336 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12337 every user file. */
12338 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12339 get_identifier ("reserved"), short_unsigned_type_node);
12340 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12341 get_identifier ("overflow_arg_area"),
12342 ptr_type_node);
12343 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12344 get_identifier ("reg_save_area"),
12345 ptr_type_node);
12347 va_list_gpr_counter_field = f_gpr;
12348 va_list_fpr_counter_field = f_fpr;
12350 DECL_FIELD_CONTEXT (f_gpr) = record;
12351 DECL_FIELD_CONTEXT (f_fpr) = record;
12352 DECL_FIELD_CONTEXT (f_res) = record;
12353 DECL_FIELD_CONTEXT (f_ovf) = record;
12354 DECL_FIELD_CONTEXT (f_sav) = record;
12356 TYPE_STUB_DECL (record) = type_decl;
12357 TYPE_NAME (record) = type_decl;
12358 TYPE_FIELDS (record) = f_gpr;
12359 DECL_CHAIN (f_gpr) = f_fpr;
12360 DECL_CHAIN (f_fpr) = f_res;
12361 DECL_CHAIN (f_res) = f_ovf;
12362 DECL_CHAIN (f_ovf) = f_sav;
12364 layout_type (record);
12366 /* The correct type is an array type of one element. */
12367 return build_array_type (record, build_index_type (size_zero_node));
12370 /* Implement va_start. */
12372 static void
12373 rs6000_va_start (tree valist, rtx nextarg)
12375 HOST_WIDE_INT words, n_gpr, n_fpr;
12376 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12377 tree gpr, fpr, ovf, sav, t;
12379 /* Only SVR4 needs something special. */
12380 if (DEFAULT_ABI != ABI_V4)
12382 std_expand_builtin_va_start (valist, nextarg);
12383 return;
12386 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12387 f_fpr = DECL_CHAIN (f_gpr);
12388 f_res = DECL_CHAIN (f_fpr);
12389 f_ovf = DECL_CHAIN (f_res);
12390 f_sav = DECL_CHAIN (f_ovf);
12392 valist = build_simple_mem_ref (valist);
12393 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12394 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12395 f_fpr, NULL_TREE);
12396 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12397 f_ovf, NULL_TREE);
12398 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12399 f_sav, NULL_TREE);
12401 /* Count number of gp and fp argument registers used. */
12402 words = crtl->args.info.words;
12403 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12404 GP_ARG_NUM_REG);
12405 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12406 FP_ARG_NUM_REG);
12408 if (TARGET_DEBUG_ARG)
12409 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12410 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12411 words, n_gpr, n_fpr);
12413 if (cfun->va_list_gpr_size)
12415 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12416 build_int_cst (NULL_TREE, n_gpr));
12417 TREE_SIDE_EFFECTS (t) = 1;
12418 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12421 if (cfun->va_list_fpr_size)
12423 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12424 build_int_cst (NULL_TREE, n_fpr));
12425 TREE_SIDE_EFFECTS (t) = 1;
12426 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12428 #ifdef HAVE_AS_GNU_ATTRIBUTE
12429 if (call_ABI_of_interest (cfun->decl))
12430 rs6000_passes_float = true;
12431 #endif
12434 /* Find the overflow area. */
12435 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12436 if (words != 0)
12437 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12438 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12439 TREE_SIDE_EFFECTS (t) = 1;
12440 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12442 /* If there were no va_arg invocations, don't set up the register
12443 save area. */
12444 if (!cfun->va_list_gpr_size
12445 && !cfun->va_list_fpr_size
12446 && n_gpr < GP_ARG_NUM_REG
12447 && n_fpr < FP_ARG_V4_MAX_REG)
12448 return;
12450 /* Find the register save area. */
12451 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12452 if (cfun->machine->varargs_save_offset)
12453 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12454 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12455 TREE_SIDE_EFFECTS (t) = 1;
12456 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12459 /* Implement va_arg. */
12461 static tree
12462 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12463 gimple_seq *post_p)
12465 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12466 tree gpr, fpr, ovf, sav, reg, t, u;
12467 int size, rsize, n_reg, sav_ofs, sav_scale;
12468 tree lab_false, lab_over, addr;
12469 int align;
12470 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12471 int regalign = 0;
12472 gimple *stmt;
12474 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12476 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12477 return build_va_arg_indirect_ref (t);
12480 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12481 earlier version of gcc, with the property that it always applied alignment
12482 adjustments to the va-args (even for zero-sized types). The cheapest way
12483 to deal with this is to replicate the effect of the part of
12484 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12485 of relevance.
12486 We don't need to check for pass-by-reference because of the test above.
12487 We can return a simplifed answer, since we know there's no offset to add. */
12489 if (((TARGET_MACHO
12490 && rs6000_darwin64_abi)
12491 || DEFAULT_ABI == ABI_ELFv2
12492 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12493 && integer_zerop (TYPE_SIZE (type)))
12495 unsigned HOST_WIDE_INT align, boundary;
12496 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12497 align = PARM_BOUNDARY / BITS_PER_UNIT;
12498 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12499 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12500 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12501 boundary /= BITS_PER_UNIT;
12502 if (boundary > align)
12504 tree t ;
12505 /* This updates arg ptr by the amount that would be necessary
12506 to align the zero-sized (but not zero-alignment) item. */
12507 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12508 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12509 gimplify_and_add (t, pre_p);
12511 t = fold_convert (sizetype, valist_tmp);
12512 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12513 fold_convert (TREE_TYPE (valist),
12514 fold_build2 (BIT_AND_EXPR, sizetype, t,
12515 size_int (-boundary))));
12516 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12517 gimplify_and_add (t, pre_p);
12519 /* Since it is zero-sized there's no increment for the item itself. */
12520 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12521 return build_va_arg_indirect_ref (valist_tmp);
12524 if (DEFAULT_ABI != ABI_V4)
12526 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12528 tree elem_type = TREE_TYPE (type);
12529 machine_mode elem_mode = TYPE_MODE (elem_type);
12530 int elem_size = GET_MODE_SIZE (elem_mode);
12532 if (elem_size < UNITS_PER_WORD)
12534 tree real_part, imag_part;
12535 gimple_seq post = NULL;
12537 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12538 &post);
12539 /* Copy the value into a temporary, lest the formal temporary
12540 be reused out from under us. */
12541 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12542 gimple_seq_add_seq (pre_p, post);
12544 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12545 post_p);
12547 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12551 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12554 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12555 f_fpr = DECL_CHAIN (f_gpr);
12556 f_res = DECL_CHAIN (f_fpr);
12557 f_ovf = DECL_CHAIN (f_res);
12558 f_sav = DECL_CHAIN (f_ovf);
12560 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12561 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12562 f_fpr, NULL_TREE);
12563 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12564 f_ovf, NULL_TREE);
12565 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12566 f_sav, NULL_TREE);
12568 size = int_size_in_bytes (type);
12569 rsize = (size + 3) / 4;
12570 align = 1;
12572 machine_mode mode = TYPE_MODE (type);
12573 if (abi_v4_pass_in_fpr (mode))
12575 /* FP args go in FP registers, if present. */
12576 reg = fpr;
12577 n_reg = (size + 7) / 8;
12578 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
12579 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
12580 if (mode != SFmode && mode != SDmode)
12581 align = 8;
12583 else
12585 /* Otherwise into GP registers. */
12586 reg = gpr;
12587 n_reg = rsize;
12588 sav_ofs = 0;
12589 sav_scale = 4;
12590 if (n_reg == 2)
12591 align = 8;
12594 /* Pull the value out of the saved registers.... */
12596 lab_over = NULL;
12597 addr = create_tmp_var (ptr_type_node, "addr");
12599 /* AltiVec vectors never go in registers when -mabi=altivec. */
12600 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12601 align = 16;
12602 else
12604 lab_false = create_artificial_label (input_location);
12605 lab_over = create_artificial_label (input_location);
12607 /* Long long and SPE vectors are aligned in the registers.
12608 As are any other 2 gpr item such as complex int due to a
12609 historical mistake. */
12610 u = reg;
12611 if (n_reg == 2 && reg == gpr)
12613 regalign = 1;
12614 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12615 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12616 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12617 unshare_expr (reg), u);
12619 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12620 reg number is 0 for f1, so we want to make it odd. */
12621 else if (reg == fpr && mode == TDmode)
12623 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12624 build_int_cst (TREE_TYPE (reg), 1));
12625 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12628 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12629 t = build2 (GE_EXPR, boolean_type_node, u, t);
12630 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12631 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12632 gimplify_and_add (t, pre_p);
12634 t = sav;
12635 if (sav_ofs)
12636 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12638 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12639 build_int_cst (TREE_TYPE (reg), n_reg));
12640 u = fold_convert (sizetype, u);
12641 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12642 t = fold_build_pointer_plus (t, u);
12644 /* _Decimal32 varargs are located in the second word of the 64-bit
12645 FP register for 32-bit binaries. */
12646 if (TARGET_32BIT
12647 && TARGET_HARD_FLOAT && TARGET_FPRS
12648 && mode == SDmode)
12649 t = fold_build_pointer_plus_hwi (t, size);
12651 gimplify_assign (addr, t, pre_p);
12653 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12655 stmt = gimple_build_label (lab_false);
12656 gimple_seq_add_stmt (pre_p, stmt);
12658 if ((n_reg == 2 && !regalign) || n_reg > 2)
12660 /* Ensure that we don't find any more args in regs.
12661 Alignment has taken care of for special cases. */
12662 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12666 /* ... otherwise out of the overflow area. */
12668 /* Care for on-stack alignment if needed. */
12669 t = ovf;
12670 if (align != 1)
12672 t = fold_build_pointer_plus_hwi (t, align - 1);
12673 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12674 build_int_cst (TREE_TYPE (t), -align));
12676 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12678 gimplify_assign (unshare_expr (addr), t, pre_p);
12680 t = fold_build_pointer_plus_hwi (t, size);
12681 gimplify_assign (unshare_expr (ovf), t, pre_p);
12683 if (lab_over)
12685 stmt = gimple_build_label (lab_over);
12686 gimple_seq_add_stmt (pre_p, stmt);
12689 if (STRICT_ALIGNMENT
12690 && (TYPE_ALIGN (type)
12691 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12693 /* The value (of type complex double, for example) may not be
12694 aligned in memory in the saved registers, so copy via a
12695 temporary. (This is the same code as used for SPARC.) */
12696 tree tmp = create_tmp_var (type, "va_arg_tmp");
12697 tree dest_addr = build_fold_addr_expr (tmp);
12699 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12700 3, dest_addr, addr, size_int (rsize * 4));
12702 gimplify_and_add (copy, pre_p);
12703 addr = dest_addr;
12706 addr = fold_convert (ptrtype, addr);
12707 return build_va_arg_indirect_ref (addr);
12710 /* Builtins. */
12712 static void
12713 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12715 tree t;
12716 unsigned classify = rs6000_builtin_info[(int)code].attr;
12717 const char *attr_string = "";
12719 gcc_assert (name != NULL);
12720 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12722 if (rs6000_builtin_decls[(int)code])
12723 fatal_error (input_location,
12724 "internal error: builtin function %s already processed", name);
12726 rs6000_builtin_decls[(int)code] = t =
12727 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12729 /* Set any special attributes. */
12730 if ((classify & RS6000_BTC_CONST) != 0)
12732 /* const function, function only depends on the inputs. */
12733 TREE_READONLY (t) = 1;
12734 TREE_NOTHROW (t) = 1;
12735 attr_string = ", const";
12737 else if ((classify & RS6000_BTC_PURE) != 0)
12739 /* pure function, function can read global memory, but does not set any
12740 external state. */
12741 DECL_PURE_P (t) = 1;
12742 TREE_NOTHROW (t) = 1;
12743 attr_string = ", pure";
12745 else if ((classify & RS6000_BTC_FP) != 0)
12747 /* Function is a math function. If rounding mode is on, then treat the
12748 function as not reading global memory, but it can have arbitrary side
12749 effects. If it is off, then assume the function is a const function.
12750 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12751 builtin-attribute.def that is used for the math functions. */
12752 TREE_NOTHROW (t) = 1;
12753 if (flag_rounding_math)
12755 DECL_PURE_P (t) = 1;
12756 DECL_IS_NOVOPS (t) = 1;
12757 attr_string = ", fp, pure";
12759 else
12761 TREE_READONLY (t) = 1;
12762 attr_string = ", fp, const";
12765 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12766 gcc_unreachable ();
12768 if (TARGET_DEBUG_BUILTIN)
12769 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12770 (int)code, name, attr_string);
12773 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12775 #undef RS6000_BUILTIN_0
12776 #undef RS6000_BUILTIN_1
12777 #undef RS6000_BUILTIN_2
12778 #undef RS6000_BUILTIN_3
12779 #undef RS6000_BUILTIN_A
12780 #undef RS6000_BUILTIN_D
12781 #undef RS6000_BUILTIN_E
12782 #undef RS6000_BUILTIN_H
12783 #undef RS6000_BUILTIN_P
12784 #undef RS6000_BUILTIN_Q
12785 #undef RS6000_BUILTIN_S
12786 #undef RS6000_BUILTIN_X
12788 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12789 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12790 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12791 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12792 { MASK, ICODE, NAME, ENUM },
12794 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12795 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12796 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12797 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12798 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12799 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12800 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12801 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12803 static const struct builtin_description bdesc_3arg[] =
12805 #include "rs6000-builtin.def"
12808 /* DST operations: void foo (void *, const int, const char). */
12810 #undef RS6000_BUILTIN_0
12811 #undef RS6000_BUILTIN_1
12812 #undef RS6000_BUILTIN_2
12813 #undef RS6000_BUILTIN_3
12814 #undef RS6000_BUILTIN_A
12815 #undef RS6000_BUILTIN_D
12816 #undef RS6000_BUILTIN_E
12817 #undef RS6000_BUILTIN_H
12818 #undef RS6000_BUILTIN_P
12819 #undef RS6000_BUILTIN_Q
12820 #undef RS6000_BUILTIN_S
12821 #undef RS6000_BUILTIN_X
12823 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12824 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12825 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12826 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12827 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12828 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12829 { MASK, ICODE, NAME, ENUM },
12831 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12832 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12833 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12834 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12835 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12836 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12838 static const struct builtin_description bdesc_dst[] =
12840 #include "rs6000-builtin.def"
12843 /* Simple binary operations: VECc = foo (VECa, VECb). */
12845 #undef RS6000_BUILTIN_0
12846 #undef RS6000_BUILTIN_1
12847 #undef RS6000_BUILTIN_2
12848 #undef RS6000_BUILTIN_3
12849 #undef RS6000_BUILTIN_A
12850 #undef RS6000_BUILTIN_D
12851 #undef RS6000_BUILTIN_E
12852 #undef RS6000_BUILTIN_H
12853 #undef RS6000_BUILTIN_P
12854 #undef RS6000_BUILTIN_Q
12855 #undef RS6000_BUILTIN_S
12856 #undef RS6000_BUILTIN_X
12858 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12859 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12860 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12861 { MASK, ICODE, NAME, ENUM },
12863 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12864 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12865 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12866 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12867 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12868 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12869 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12870 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12871 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12873 static const struct builtin_description bdesc_2arg[] =
12875 #include "rs6000-builtin.def"
12878 #undef RS6000_BUILTIN_0
12879 #undef RS6000_BUILTIN_1
12880 #undef RS6000_BUILTIN_2
12881 #undef RS6000_BUILTIN_3
12882 #undef RS6000_BUILTIN_A
12883 #undef RS6000_BUILTIN_D
12884 #undef RS6000_BUILTIN_E
12885 #undef RS6000_BUILTIN_H
12886 #undef RS6000_BUILTIN_P
12887 #undef RS6000_BUILTIN_Q
12888 #undef RS6000_BUILTIN_S
12889 #undef RS6000_BUILTIN_X
12891 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12892 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12893 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12894 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12895 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12896 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12897 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12898 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12899 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12900 { MASK, ICODE, NAME, ENUM },
12902 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12903 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12904 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12906 /* AltiVec predicates. */
12908 static const struct builtin_description bdesc_altivec_preds[] =
12910 #include "rs6000-builtin.def"
12913 /* SPE predicates. */
12914 #undef RS6000_BUILTIN_0
12915 #undef RS6000_BUILTIN_1
12916 #undef RS6000_BUILTIN_2
12917 #undef RS6000_BUILTIN_3
12918 #undef RS6000_BUILTIN_A
12919 #undef RS6000_BUILTIN_D
12920 #undef RS6000_BUILTIN_E
12921 #undef RS6000_BUILTIN_H
12922 #undef RS6000_BUILTIN_P
12923 #undef RS6000_BUILTIN_Q
12924 #undef RS6000_BUILTIN_S
12925 #undef RS6000_BUILTIN_X
12927 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12928 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12929 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12930 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12931 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12932 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12933 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
12934 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12935 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12936 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12937 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
12938 { MASK, ICODE, NAME, ENUM },
12940 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12942 static const struct builtin_description bdesc_spe_predicates[] =
12944 #include "rs6000-builtin.def"
12947 /* SPE evsel predicates. */
12948 #undef RS6000_BUILTIN_0
12949 #undef RS6000_BUILTIN_1
12950 #undef RS6000_BUILTIN_2
12951 #undef RS6000_BUILTIN_3
12952 #undef RS6000_BUILTIN_A
12953 #undef RS6000_BUILTIN_D
12954 #undef RS6000_BUILTIN_E
12955 #undef RS6000_BUILTIN_H
12956 #undef RS6000_BUILTIN_P
12957 #undef RS6000_BUILTIN_Q
12958 #undef RS6000_BUILTIN_S
12959 #undef RS6000_BUILTIN_X
12961 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12962 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12963 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12964 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12965 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12966 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12967 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
12968 { MASK, ICODE, NAME, ENUM },
12970 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12971 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12972 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
12973 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
12974 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12976 static const struct builtin_description bdesc_spe_evsel[] =
12978 #include "rs6000-builtin.def"
12981 /* PAIRED predicates. */
12982 #undef RS6000_BUILTIN_0
12983 #undef RS6000_BUILTIN_1
12984 #undef RS6000_BUILTIN_2
12985 #undef RS6000_BUILTIN_3
12986 #undef RS6000_BUILTIN_A
12987 #undef RS6000_BUILTIN_D
12988 #undef RS6000_BUILTIN_E
12989 #undef RS6000_BUILTIN_H
12990 #undef RS6000_BUILTIN_P
12991 #undef RS6000_BUILTIN_Q
12992 #undef RS6000_BUILTIN_S
12993 #undef RS6000_BUILTIN_X
12995 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12996 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12997 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12998 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12999 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13000 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13001 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13002 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13003 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13004 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13005 { MASK, ICODE, NAME, ENUM },
13007 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13008 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13010 static const struct builtin_description bdesc_paired_preds[] =
13012 #include "rs6000-builtin.def"
13015 /* ABS* operations. */
13017 #undef RS6000_BUILTIN_0
13018 #undef RS6000_BUILTIN_1
13019 #undef RS6000_BUILTIN_2
13020 #undef RS6000_BUILTIN_3
13021 #undef RS6000_BUILTIN_A
13022 #undef RS6000_BUILTIN_D
13023 #undef RS6000_BUILTIN_E
13024 #undef RS6000_BUILTIN_H
13025 #undef RS6000_BUILTIN_P
13026 #undef RS6000_BUILTIN_Q
13027 #undef RS6000_BUILTIN_S
13028 #undef RS6000_BUILTIN_X
13030 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13031 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13032 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13033 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13034 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13035 { MASK, ICODE, NAME, ENUM },
13037 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13038 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13039 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13040 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13041 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13042 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13043 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13045 static const struct builtin_description bdesc_abs[] =
13047 #include "rs6000-builtin.def"
13050 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13051 foo (VECa). */
13053 #undef RS6000_BUILTIN_0
13054 #undef RS6000_BUILTIN_1
13055 #undef RS6000_BUILTIN_2
13056 #undef RS6000_BUILTIN_3
13057 #undef RS6000_BUILTIN_A
13058 #undef RS6000_BUILTIN_D
13059 #undef RS6000_BUILTIN_E
13060 #undef RS6000_BUILTIN_H
13061 #undef RS6000_BUILTIN_P
13062 #undef RS6000_BUILTIN_Q
13063 #undef RS6000_BUILTIN_S
13064 #undef RS6000_BUILTIN_X
13066 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13067 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13068 { MASK, ICODE, NAME, ENUM },
13070 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13071 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13072 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13073 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13074 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13076 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13077 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13078 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13079 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13081 static const struct builtin_description bdesc_1arg[] =
13083 #include "rs6000-builtin.def"
13086 /* Simple no-argument operations: result = __builtin_darn_32 () */
13088 #undef RS6000_BUILTIN_0
13089 #undef RS6000_BUILTIN_1
13090 #undef RS6000_BUILTIN_2
13091 #undef RS6000_BUILTIN_3
13092 #undef RS6000_BUILTIN_A
13093 #undef RS6000_BUILTIN_D
13094 #undef RS6000_BUILTIN_E
13095 #undef RS6000_BUILTIN_H
13096 #undef RS6000_BUILTIN_P
13097 #undef RS6000_BUILTIN_Q
13098 #undef RS6000_BUILTIN_S
13099 #undef RS6000_BUILTIN_X
13101 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13102 { MASK, ICODE, NAME, ENUM },
13104 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13108 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13109 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13110 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13111 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13112 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13113 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13114 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13116 static const struct builtin_description bdesc_0arg[] =
13118 #include "rs6000-builtin.def"
13121 /* HTM builtins. */
13122 #undef RS6000_BUILTIN_0
13123 #undef RS6000_BUILTIN_1
13124 #undef RS6000_BUILTIN_2
13125 #undef RS6000_BUILTIN_3
13126 #undef RS6000_BUILTIN_A
13127 #undef RS6000_BUILTIN_D
13128 #undef RS6000_BUILTIN_E
13129 #undef RS6000_BUILTIN_H
13130 #undef RS6000_BUILTIN_P
13131 #undef RS6000_BUILTIN_Q
13132 #undef RS6000_BUILTIN_S
13133 #undef RS6000_BUILTIN_X
13135 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13137 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13138 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13139 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13140 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13141 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13142 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13143 { MASK, ICODE, NAME, ENUM },
13145 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13146 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13147 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13148 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13150 static const struct builtin_description bdesc_htm[] =
13152 #include "rs6000-builtin.def"
13155 #undef RS6000_BUILTIN_0
13156 #undef RS6000_BUILTIN_1
13157 #undef RS6000_BUILTIN_2
13158 #undef RS6000_BUILTIN_3
13159 #undef RS6000_BUILTIN_A
13160 #undef RS6000_BUILTIN_D
13161 #undef RS6000_BUILTIN_E
13162 #undef RS6000_BUILTIN_H
13163 #undef RS6000_BUILTIN_P
13164 #undef RS6000_BUILTIN_Q
13165 #undef RS6000_BUILTIN_S
13167 /* Return true if a builtin function is overloaded. */
13168 bool
13169 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13171 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13174 /* Expand an expression EXP that calls a builtin without arguments. */
13175 static rtx
13176 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13178 rtx pat;
13179 machine_mode tmode = insn_data[icode].operand[0].mode;
13181 if (icode == CODE_FOR_nothing)
13182 /* Builtin not supported on this processor. */
13183 return 0;
13185 if (target == 0
13186 || GET_MODE (target) != tmode
13187 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13188 target = gen_reg_rtx (tmode);
13190 pat = GEN_FCN (icode) (target);
13191 if (! pat)
13192 return 0;
13193 emit_insn (pat);
13195 return target;
13199 static rtx
13200 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13202 rtx pat;
13203 tree arg0 = CALL_EXPR_ARG (exp, 0);
13204 tree arg1 = CALL_EXPR_ARG (exp, 1);
13205 rtx op0 = expand_normal (arg0);
13206 rtx op1 = expand_normal (arg1);
13207 machine_mode mode0 = insn_data[icode].operand[0].mode;
13208 machine_mode mode1 = insn_data[icode].operand[1].mode;
13210 if (icode == CODE_FOR_nothing)
13211 /* Builtin not supported on this processor. */
13212 return 0;
13214 /* If we got invalid arguments bail out before generating bad rtl. */
13215 if (arg0 == error_mark_node || arg1 == error_mark_node)
13216 return const0_rtx;
13218 if (GET_CODE (op0) != CONST_INT
13219 || INTVAL (op0) > 255
13220 || INTVAL (op0) < 0)
13222 error ("argument 1 must be an 8-bit field value");
13223 return const0_rtx;
13226 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13227 op0 = copy_to_mode_reg (mode0, op0);
13229 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13230 op1 = copy_to_mode_reg (mode1, op1);
13232 pat = GEN_FCN (icode) (op0, op1);
13233 if (! pat)
13234 return const0_rtx;
13235 emit_insn (pat);
13237 return NULL_RTX;
13240 static rtx
13241 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13243 rtx pat;
13244 tree arg0 = CALL_EXPR_ARG (exp, 0);
13245 rtx op0 = expand_normal (arg0);
13246 machine_mode tmode = insn_data[icode].operand[0].mode;
13247 machine_mode mode0 = insn_data[icode].operand[1].mode;
13249 if (icode == CODE_FOR_nothing)
13250 /* Builtin not supported on this processor. */
13251 return 0;
13253 /* If we got invalid arguments bail out before generating bad rtl. */
13254 if (arg0 == error_mark_node)
13255 return const0_rtx;
13257 if (icode == CODE_FOR_altivec_vspltisb
13258 || icode == CODE_FOR_altivec_vspltish
13259 || icode == CODE_FOR_altivec_vspltisw
13260 || icode == CODE_FOR_spe_evsplatfi
13261 || icode == CODE_FOR_spe_evsplati)
13263 /* Only allow 5-bit *signed* literals. */
13264 if (GET_CODE (op0) != CONST_INT
13265 || INTVAL (op0) > 15
13266 || INTVAL (op0) < -16)
13268 error ("argument 1 must be a 5-bit signed literal");
13269 return const0_rtx;
13273 if (target == 0
13274 || GET_MODE (target) != tmode
13275 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13276 target = gen_reg_rtx (tmode);
13278 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13279 op0 = copy_to_mode_reg (mode0, op0);
13281 pat = GEN_FCN (icode) (target, op0);
13282 if (! pat)
13283 return 0;
13284 emit_insn (pat);
13286 return target;
13289 static rtx
13290 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13292 rtx pat, scratch1, scratch2;
13293 tree arg0 = CALL_EXPR_ARG (exp, 0);
13294 rtx op0 = expand_normal (arg0);
13295 machine_mode tmode = insn_data[icode].operand[0].mode;
13296 machine_mode mode0 = insn_data[icode].operand[1].mode;
13298 /* If we have invalid arguments, bail out before generating bad rtl. */
13299 if (arg0 == error_mark_node)
13300 return const0_rtx;
13302 if (target == 0
13303 || GET_MODE (target) != tmode
13304 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13305 target = gen_reg_rtx (tmode);
13307 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13308 op0 = copy_to_mode_reg (mode0, op0);
13310 scratch1 = gen_reg_rtx (mode0);
13311 scratch2 = gen_reg_rtx (mode0);
13313 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13314 if (! pat)
13315 return 0;
13316 emit_insn (pat);
13318 return target;
13321 static rtx
13322 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13324 rtx pat;
13325 tree arg0 = CALL_EXPR_ARG (exp, 0);
13326 tree arg1 = CALL_EXPR_ARG (exp, 1);
13327 rtx op0 = expand_normal (arg0);
13328 rtx op1 = expand_normal (arg1);
13329 machine_mode tmode = insn_data[icode].operand[0].mode;
13330 machine_mode mode0 = insn_data[icode].operand[1].mode;
13331 machine_mode mode1 = insn_data[icode].operand[2].mode;
13333 if (icode == CODE_FOR_nothing)
13334 /* Builtin not supported on this processor. */
13335 return 0;
13337 /* If we got invalid arguments bail out before generating bad rtl. */
13338 if (arg0 == error_mark_node || arg1 == error_mark_node)
13339 return const0_rtx;
13341 if (icode == CODE_FOR_altivec_vcfux
13342 || icode == CODE_FOR_altivec_vcfsx
13343 || icode == CODE_FOR_altivec_vctsxs
13344 || icode == CODE_FOR_altivec_vctuxs
13345 || icode == CODE_FOR_altivec_vspltb
13346 || icode == CODE_FOR_altivec_vsplth
13347 || icode == CODE_FOR_altivec_vspltw
13348 || icode == CODE_FOR_spe_evaddiw
13349 || icode == CODE_FOR_spe_evldd
13350 || icode == CODE_FOR_spe_evldh
13351 || icode == CODE_FOR_spe_evldw
13352 || icode == CODE_FOR_spe_evlhhesplat
13353 || icode == CODE_FOR_spe_evlhhossplat
13354 || icode == CODE_FOR_spe_evlhhousplat
13355 || icode == CODE_FOR_spe_evlwhe
13356 || icode == CODE_FOR_spe_evlwhos
13357 || icode == CODE_FOR_spe_evlwhou
13358 || icode == CODE_FOR_spe_evlwhsplat
13359 || icode == CODE_FOR_spe_evlwwsplat
13360 || icode == CODE_FOR_spe_evrlwi
13361 || icode == CODE_FOR_spe_evslwi
13362 || icode == CODE_FOR_spe_evsrwis
13363 || icode == CODE_FOR_spe_evsubifw
13364 || icode == CODE_FOR_spe_evsrwiu)
13366 /* Only allow 5-bit unsigned literals. */
13367 STRIP_NOPS (arg1);
13368 if (TREE_CODE (arg1) != INTEGER_CST
13369 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13371 error ("argument 2 must be a 5-bit unsigned literal");
13372 return const0_rtx;
13376 if (target == 0
13377 || GET_MODE (target) != tmode
13378 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13379 target = gen_reg_rtx (tmode);
13381 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13382 op0 = copy_to_mode_reg (mode0, op0);
13383 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13384 op1 = copy_to_mode_reg (mode1, op1);
13386 pat = GEN_FCN (icode) (target, op0, op1);
13387 if (! pat)
13388 return 0;
13389 emit_insn (pat);
13391 return target;
13394 static rtx
13395 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13397 rtx pat, scratch;
13398 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13399 tree arg0 = CALL_EXPR_ARG (exp, 1);
13400 tree arg1 = CALL_EXPR_ARG (exp, 2);
13401 rtx op0 = expand_normal (arg0);
13402 rtx op1 = expand_normal (arg1);
13403 machine_mode tmode = SImode;
13404 machine_mode mode0 = insn_data[icode].operand[1].mode;
13405 machine_mode mode1 = insn_data[icode].operand[2].mode;
13406 int cr6_form_int;
13408 if (TREE_CODE (cr6_form) != INTEGER_CST)
13410 error ("argument 1 of __builtin_altivec_predicate must be a constant");
13411 return const0_rtx;
13413 else
13414 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13416 gcc_assert (mode0 == mode1);
13418 /* If we have invalid arguments, bail out before generating bad rtl. */
13419 if (arg0 == error_mark_node || arg1 == error_mark_node)
13420 return const0_rtx;
13422 if (target == 0
13423 || GET_MODE (target) != tmode
13424 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13425 target = gen_reg_rtx (tmode);
13427 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13428 op0 = copy_to_mode_reg (mode0, op0);
13429 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13430 op1 = copy_to_mode_reg (mode1, op1);
13432 scratch = gen_reg_rtx (mode0);
13434 pat = GEN_FCN (icode) (scratch, op0, op1);
13435 if (! pat)
13436 return 0;
13437 emit_insn (pat);
13439 /* The vec_any* and vec_all* predicates use the same opcodes for two
13440 different operations, but the bits in CR6 will be different
13441 depending on what information we want. So we have to play tricks
13442 with CR6 to get the right bits out.
13444 If you think this is disgusting, look at the specs for the
13445 AltiVec predicates. */
13447 switch (cr6_form_int)
13449 case 0:
13450 emit_insn (gen_cr6_test_for_zero (target));
13451 break;
13452 case 1:
13453 emit_insn (gen_cr6_test_for_zero_reverse (target));
13454 break;
13455 case 2:
13456 emit_insn (gen_cr6_test_for_lt (target));
13457 break;
13458 case 3:
13459 emit_insn (gen_cr6_test_for_lt_reverse (target));
13460 break;
13461 default:
13462 error ("argument 1 of __builtin_altivec_predicate is out of range");
13463 break;
13466 return target;
13469 static rtx
13470 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
13472 rtx pat, addr;
13473 tree arg0 = CALL_EXPR_ARG (exp, 0);
13474 tree arg1 = CALL_EXPR_ARG (exp, 1);
13475 machine_mode tmode = insn_data[icode].operand[0].mode;
13476 machine_mode mode0 = Pmode;
13477 machine_mode mode1 = Pmode;
13478 rtx op0 = expand_normal (arg0);
13479 rtx op1 = expand_normal (arg1);
13481 if (icode == CODE_FOR_nothing)
13482 /* Builtin not supported on this processor. */
13483 return 0;
13485 /* If we got invalid arguments bail out before generating bad rtl. */
13486 if (arg0 == error_mark_node || arg1 == error_mark_node)
13487 return const0_rtx;
13489 if (target == 0
13490 || GET_MODE (target) != tmode
13491 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13492 target = gen_reg_rtx (tmode);
13494 op1 = copy_to_mode_reg (mode1, op1);
13496 if (op0 == const0_rtx)
13498 addr = gen_rtx_MEM (tmode, op1);
13500 else
13502 op0 = copy_to_mode_reg (mode0, op0);
13503 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
13506 pat = GEN_FCN (icode) (target, addr);
13508 if (! pat)
13509 return 0;
13510 emit_insn (pat);
13512 return target;
13515 /* Return a constant vector for use as a little-endian permute control vector
13516 to reverse the order of elements of the given vector mode. */
13517 static rtx
13518 swap_selector_for_mode (machine_mode mode)
13520 /* These are little endian vectors, so their elements are reversed
13521 from what you would normally expect for a permute control vector. */
13522 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13523 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13524 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13525 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
13526 unsigned int *swaparray, i;
13527 rtx perm[16];
13529 switch (mode)
13531 case V2DFmode:
13532 case V2DImode:
13533 swaparray = swap2;
13534 break;
13535 case V4SFmode:
13536 case V4SImode:
13537 swaparray = swap4;
13538 break;
13539 case V8HImode:
13540 swaparray = swap8;
13541 break;
13542 case V16QImode:
13543 swaparray = swap16;
13544 break;
13545 default:
13546 gcc_unreachable ();
13549 for (i = 0; i < 16; ++i)
13550 perm[i] = GEN_INT (swaparray[i]);
13552 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
13555 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
13556 with -maltivec=be specified. Issue the load followed by an element-
13557 reversing permute. */
13558 void
13559 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13561 rtx tmp = gen_reg_rtx (mode);
13562 rtx load = gen_rtx_SET (tmp, op1);
13563 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13564 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
13565 rtx sel = swap_selector_for_mode (mode);
13566 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
13568 gcc_assert (REG_P (op0));
13569 emit_insn (par);
13570 emit_insn (gen_rtx_SET (op0, vperm));
13573 /* Generate code for a "stvxl" built-in for a little endian target with
13574 -maltivec=be specified. Issue the store preceded by an element-reversing
13575 permute. */
13576 void
13577 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13579 rtx tmp = gen_reg_rtx (mode);
13580 rtx store = gen_rtx_SET (op0, tmp);
13581 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
13582 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
13583 rtx sel = swap_selector_for_mode (mode);
13584 rtx vperm;
13586 gcc_assert (REG_P (op1));
13587 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13588 emit_insn (gen_rtx_SET (tmp, vperm));
13589 emit_insn (par);
13592 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
13593 specified. Issue the store preceded by an element-reversing permute. */
13594 void
13595 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
13597 machine_mode inner_mode = GET_MODE_INNER (mode);
13598 rtx tmp = gen_reg_rtx (mode);
13599 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
13600 rtx sel = swap_selector_for_mode (mode);
13601 rtx vperm;
13603 gcc_assert (REG_P (op1));
13604 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
13605 emit_insn (gen_rtx_SET (tmp, vperm));
13606 emit_insn (gen_rtx_SET (op0, stvx));
13609 static rtx
13610 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13612 rtx pat, addr;
13613 tree arg0 = CALL_EXPR_ARG (exp, 0);
13614 tree arg1 = CALL_EXPR_ARG (exp, 1);
13615 machine_mode tmode = insn_data[icode].operand[0].mode;
13616 machine_mode mode0 = Pmode;
13617 machine_mode mode1 = Pmode;
13618 rtx op0 = expand_normal (arg0);
13619 rtx op1 = expand_normal (arg1);
13621 if (icode == CODE_FOR_nothing)
13622 /* Builtin not supported on this processor. */
13623 return 0;
13625 /* If we got invalid arguments bail out before generating bad rtl. */
13626 if (arg0 == error_mark_node || arg1 == error_mark_node)
13627 return const0_rtx;
13629 if (target == 0
13630 || GET_MODE (target) != tmode
13631 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13632 target = gen_reg_rtx (tmode);
13634 op1 = copy_to_mode_reg (mode1, op1);
13636 /* For LVX, express the RTL accurately by ANDing the address with -16.
13637 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13638 so the raw address is fine. */
13639 if (icode == CODE_FOR_altivec_lvx_v2df_2op
13640 || icode == CODE_FOR_altivec_lvx_v2di_2op
13641 || icode == CODE_FOR_altivec_lvx_v4sf_2op
13642 || icode == CODE_FOR_altivec_lvx_v4si_2op
13643 || icode == CODE_FOR_altivec_lvx_v8hi_2op
13644 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
13646 rtx rawaddr;
13647 if (op0 == const0_rtx)
13648 rawaddr = op1;
13649 else
13651 op0 = copy_to_mode_reg (mode0, op0);
13652 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13654 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13655 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13657 /* For -maltivec=be, emit the load and follow it up with a
13658 permute to swap the elements. */
13659 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
13661 rtx temp = gen_reg_rtx (tmode);
13662 emit_insn (gen_rtx_SET (temp, addr));
13664 rtx sel = swap_selector_for_mode (tmode);
13665 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
13666 UNSPEC_VPERM);
13667 emit_insn (gen_rtx_SET (target, vperm));
13669 else
13670 emit_insn (gen_rtx_SET (target, addr));
13672 else
13674 if (op0 == const0_rtx)
13675 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13676 else
13678 op0 = copy_to_mode_reg (mode0, op0);
13679 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13680 gen_rtx_PLUS (Pmode, op1, op0));
13683 pat = GEN_FCN (icode) (target, addr);
13684 if (! pat)
13685 return 0;
13686 emit_insn (pat);
13689 return target;
13692 static rtx
13693 spe_expand_stv_builtin (enum insn_code icode, tree exp)
13695 tree arg0 = CALL_EXPR_ARG (exp, 0);
13696 tree arg1 = CALL_EXPR_ARG (exp, 1);
13697 tree arg2 = CALL_EXPR_ARG (exp, 2);
13698 rtx op0 = expand_normal (arg0);
13699 rtx op1 = expand_normal (arg1);
13700 rtx op2 = expand_normal (arg2);
13701 rtx pat;
13702 machine_mode mode0 = insn_data[icode].operand[0].mode;
13703 machine_mode mode1 = insn_data[icode].operand[1].mode;
13704 machine_mode mode2 = insn_data[icode].operand[2].mode;
13706 /* Invalid arguments. Bail before doing anything stoopid! */
13707 if (arg0 == error_mark_node
13708 || arg1 == error_mark_node
13709 || arg2 == error_mark_node)
13710 return const0_rtx;
13712 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
13713 op0 = copy_to_mode_reg (mode2, op0);
13714 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
13715 op1 = copy_to_mode_reg (mode0, op1);
13716 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13717 op2 = copy_to_mode_reg (mode1, op2);
13719 pat = GEN_FCN (icode) (op1, op2, op0);
13720 if (pat)
13721 emit_insn (pat);
13722 return NULL_RTX;
13725 static rtx
13726 paired_expand_stv_builtin (enum insn_code icode, tree exp)
13728 tree arg0 = CALL_EXPR_ARG (exp, 0);
13729 tree arg1 = CALL_EXPR_ARG (exp, 1);
13730 tree arg2 = CALL_EXPR_ARG (exp, 2);
13731 rtx op0 = expand_normal (arg0);
13732 rtx op1 = expand_normal (arg1);
13733 rtx op2 = expand_normal (arg2);
13734 rtx pat, addr;
13735 machine_mode tmode = insn_data[icode].operand[0].mode;
13736 machine_mode mode1 = Pmode;
13737 machine_mode mode2 = Pmode;
13739 /* Invalid arguments. Bail before doing anything stoopid! */
13740 if (arg0 == error_mark_node
13741 || arg1 == error_mark_node
13742 || arg2 == error_mark_node)
13743 return const0_rtx;
13745 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
13746 op0 = copy_to_mode_reg (tmode, op0);
13748 op2 = copy_to_mode_reg (mode2, op2);
13750 if (op1 == const0_rtx)
13752 addr = gen_rtx_MEM (tmode, op2);
13754 else
13756 op1 = copy_to_mode_reg (mode1, op1);
13757 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
13760 pat = GEN_FCN (icode) (addr, op0);
13761 if (pat)
13762 emit_insn (pat);
13763 return NULL_RTX;
13766 static rtx
13767 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13769 tree arg0 = CALL_EXPR_ARG (exp, 0);
13770 tree arg1 = CALL_EXPR_ARG (exp, 1);
13771 tree arg2 = CALL_EXPR_ARG (exp, 2);
13772 rtx op0 = expand_normal (arg0);
13773 rtx op1 = expand_normal (arg1);
13774 rtx op2 = expand_normal (arg2);
13775 rtx pat, addr, rawaddr;
13776 machine_mode tmode = insn_data[icode].operand[0].mode;
13777 machine_mode smode = insn_data[icode].operand[1].mode;
13778 machine_mode mode1 = Pmode;
13779 machine_mode mode2 = Pmode;
13781 /* Invalid arguments. Bail before doing anything stoopid! */
13782 if (arg0 == error_mark_node
13783 || arg1 == error_mark_node
13784 || arg2 == error_mark_node)
13785 return const0_rtx;
13787 op2 = copy_to_mode_reg (mode2, op2);
13789 /* For STVX, express the RTL accurately by ANDing the address with -16.
13790 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13791 so the raw address is fine. */
13792 if (icode == CODE_FOR_altivec_stvx_v2df_2op
13793 || icode == CODE_FOR_altivec_stvx_v2di_2op
13794 || icode == CODE_FOR_altivec_stvx_v4sf_2op
13795 || icode == CODE_FOR_altivec_stvx_v4si_2op
13796 || icode == CODE_FOR_altivec_stvx_v8hi_2op
13797 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
13799 if (op1 == const0_rtx)
13800 rawaddr = op2;
13801 else
13803 op1 = copy_to_mode_reg (mode1, op1);
13804 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13807 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13808 addr = gen_rtx_MEM (tmode, addr);
13810 op0 = copy_to_mode_reg (tmode, op0);
13812 /* For -maltivec=be, emit a permute to swap the elements, followed
13813 by the store. */
13814 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
13816 rtx temp = gen_reg_rtx (tmode);
13817 rtx sel = swap_selector_for_mode (tmode);
13818 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
13819 UNSPEC_VPERM);
13820 emit_insn (gen_rtx_SET (temp, vperm));
13821 emit_insn (gen_rtx_SET (addr, temp));
13823 else
13824 emit_insn (gen_rtx_SET (addr, op0));
13826 else
13828 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13829 op0 = copy_to_mode_reg (smode, op0);
13831 if (op1 == const0_rtx)
13832 addr = gen_rtx_MEM (tmode, op2);
13833 else
13835 op1 = copy_to_mode_reg (mode1, op1);
13836 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13839 pat = GEN_FCN (icode) (addr, op0);
13840 if (pat)
13841 emit_insn (pat);
13844 return NULL_RTX;
13847 /* Return the appropriate SPR number associated with the given builtin. */
13848 static inline HOST_WIDE_INT
13849 htm_spr_num (enum rs6000_builtins code)
13851 if (code == HTM_BUILTIN_GET_TFHAR
13852 || code == HTM_BUILTIN_SET_TFHAR)
13853 return TFHAR_SPR;
13854 else if (code == HTM_BUILTIN_GET_TFIAR
13855 || code == HTM_BUILTIN_SET_TFIAR)
13856 return TFIAR_SPR;
13857 else if (code == HTM_BUILTIN_GET_TEXASR
13858 || code == HTM_BUILTIN_SET_TEXASR)
13859 return TEXASR_SPR;
13860 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13861 || code == HTM_BUILTIN_SET_TEXASRU);
13862 return TEXASRU_SPR;
13865 /* Return the appropriate SPR regno associated with the given builtin. */
13866 static inline HOST_WIDE_INT
13867 htm_spr_regno (enum rs6000_builtins code)
13869 if (code == HTM_BUILTIN_GET_TFHAR
13870 || code == HTM_BUILTIN_SET_TFHAR)
13871 return TFHAR_REGNO;
13872 else if (code == HTM_BUILTIN_GET_TFIAR
13873 || code == HTM_BUILTIN_SET_TFIAR)
13874 return TFIAR_REGNO;
13875 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
13876 || code == HTM_BUILTIN_SET_TEXASR
13877 || code == HTM_BUILTIN_GET_TEXASRU
13878 || code == HTM_BUILTIN_SET_TEXASRU);
13879 return TEXASR_REGNO;
13882 /* Return the correct ICODE value depending on whether we are
13883 setting or reading the HTM SPRs. */
13884 static inline enum insn_code
13885 rs6000_htm_spr_icode (bool nonvoid)
13887 if (nonvoid)
13888 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13889 else
13890 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13893 /* Expand the HTM builtin in EXP and store the result in TARGET.
13894 Store true in *EXPANDEDP if we found a builtin to expand. */
13895 static rtx
13896 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13898 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13899 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13900 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13901 const struct builtin_description *d;
13902 size_t i;
13904 *expandedp = true;
13906 if (!TARGET_POWERPC64
13907 && (fcode == HTM_BUILTIN_TABORTDC
13908 || fcode == HTM_BUILTIN_TABORTDCI))
13910 size_t uns_fcode = (size_t)fcode;
13911 const char *name = rs6000_builtin_info[uns_fcode].name;
13912 error ("builtin %s is only valid in 64-bit mode", name);
13913 return const0_rtx;
13916 /* Expand the HTM builtins. */
13917 d = bdesc_htm;
13918 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13919 if (d->code == fcode)
13921 rtx op[MAX_HTM_OPERANDS], pat;
13922 int nopnds = 0;
13923 tree arg;
13924 call_expr_arg_iterator iter;
13925 unsigned attr = rs6000_builtin_info[fcode].attr;
13926 enum insn_code icode = d->icode;
13927 const struct insn_operand_data *insn_op;
13928 bool uses_spr = (attr & RS6000_BTC_SPR);
13929 rtx cr = NULL_RTX;
13931 if (uses_spr)
13932 icode = rs6000_htm_spr_icode (nonvoid);
13933 insn_op = &insn_data[icode].operand[0];
13935 if (nonvoid)
13937 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
13938 if (!target
13939 || GET_MODE (target) != tmode
13940 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13941 target = gen_reg_rtx (tmode);
13942 if (uses_spr)
13943 op[nopnds++] = target;
13946 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13948 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13949 return const0_rtx;
13951 insn_op = &insn_data[icode].operand[nopnds];
13953 op[nopnds] = expand_normal (arg);
13955 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13957 if (!strcmp (insn_op->constraint, "n"))
13959 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13960 if (!CONST_INT_P (op[nopnds]))
13961 error ("argument %d must be an unsigned literal", arg_num);
13962 else
13963 error ("argument %d is an unsigned literal that is "
13964 "out of range", arg_num);
13965 return const0_rtx;
13967 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13970 nopnds++;
13973 /* Handle the builtins for extended mnemonics. These accept
13974 no arguments, but map to builtins that take arguments. */
13975 switch (fcode)
13977 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13978 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13979 op[nopnds++] = GEN_INT (1);
13980 if (flag_checking)
13981 attr |= RS6000_BTC_UNARY;
13982 break;
13983 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13984 op[nopnds++] = GEN_INT (0);
13985 if (flag_checking)
13986 attr |= RS6000_BTC_UNARY;
13987 break;
13988 default:
13989 break;
13992 /* If this builtin accesses SPRs, then pass in the appropriate
13993 SPR number and SPR regno as the last two operands. */
13994 if (uses_spr)
13996 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13997 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13998 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14000 /* If this builtin accesses a CR, then pass in a scratch
14001 CR as the last operand. */
14002 else if (attr & RS6000_BTC_CR)
14003 { cr = gen_reg_rtx (CCmode);
14004 op[nopnds++] = cr;
14007 if (flag_checking)
14009 int expected_nopnds = 0;
14010 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14011 expected_nopnds = 1;
14012 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14013 expected_nopnds = 2;
14014 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14015 expected_nopnds = 3;
14016 if (!(attr & RS6000_BTC_VOID))
14017 expected_nopnds += 1;
14018 if (uses_spr)
14019 expected_nopnds += 2;
14021 gcc_assert (nopnds == expected_nopnds
14022 && nopnds <= MAX_HTM_OPERANDS);
14025 switch (nopnds)
14027 case 1:
14028 pat = GEN_FCN (icode) (op[0]);
14029 break;
14030 case 2:
14031 pat = GEN_FCN (icode) (op[0], op[1]);
14032 break;
14033 case 3:
14034 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14035 break;
14036 case 4:
14037 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14038 break;
14039 default:
14040 gcc_unreachable ();
14042 if (!pat)
14043 return NULL_RTX;
14044 emit_insn (pat);
14046 if (attr & RS6000_BTC_CR)
14048 if (fcode == HTM_BUILTIN_TBEGIN)
14050 /* Emit code to set TARGET to true or false depending on
14051 whether the tbegin. instruction successfully or failed
14052 to start a transaction. We do this by placing the 1's
14053 complement of CR's EQ bit into TARGET. */
14054 rtx scratch = gen_reg_rtx (SImode);
14055 emit_insn (gen_rtx_SET (scratch,
14056 gen_rtx_EQ (SImode, cr,
14057 const0_rtx)));
14058 emit_insn (gen_rtx_SET (target,
14059 gen_rtx_XOR (SImode, scratch,
14060 GEN_INT (1))));
14062 else
14064 /* Emit code to copy the 4-bit condition register field
14065 CR into the least significant end of register TARGET. */
14066 rtx scratch1 = gen_reg_rtx (SImode);
14067 rtx scratch2 = gen_reg_rtx (SImode);
14068 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14069 emit_insn (gen_movcc (subreg, cr));
14070 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14071 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14075 if (nonvoid)
14076 return target;
14077 return const0_rtx;
14080 *expandedp = false;
14081 return NULL_RTX;
14084 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14086 static rtx
14087 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14088 rtx target)
14090 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14091 if (fcode == RS6000_BUILTIN_CPU_INIT)
14092 return const0_rtx;
14094 if (target == 0 || GET_MODE (target) != SImode)
14095 target = gen_reg_rtx (SImode);
14097 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14098 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14099 if (TREE_CODE (arg) != STRING_CST)
14101 error ("builtin %s only accepts a string argument",
14102 rs6000_builtin_info[(size_t) fcode].name);
14103 return const0_rtx;
14106 if (fcode == RS6000_BUILTIN_CPU_IS)
14108 const char *cpu = TREE_STRING_POINTER (arg);
14109 rtx cpuid = NULL_RTX;
14110 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14111 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14113 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14114 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14115 break;
14117 if (cpuid == NULL_RTX)
14119 /* Invalid CPU argument. */
14120 error ("cpu %s is an invalid argument to builtin %s",
14121 cpu, rs6000_builtin_info[(size_t) fcode].name);
14122 return const0_rtx;
14125 rtx platform = gen_reg_rtx (SImode);
14126 rtx tcbmem = gen_const_mem (SImode,
14127 gen_rtx_PLUS (Pmode,
14128 gen_rtx_REG (Pmode, TLS_REGNUM),
14129 GEN_INT (TCB_PLATFORM_OFFSET)));
14130 emit_move_insn (platform, tcbmem);
14131 emit_insn (gen_eqsi3 (target, platform, cpuid));
14133 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14135 const char *hwcap = TREE_STRING_POINTER (arg);
14136 rtx mask = NULL_RTX;
14137 int hwcap_offset;
14138 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14139 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14141 mask = GEN_INT (cpu_supports_info[i].mask);
14142 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14143 break;
14145 if (mask == NULL_RTX)
14147 /* Invalid HWCAP argument. */
14148 error ("hwcap %s is an invalid argument to builtin %s",
14149 hwcap, rs6000_builtin_info[(size_t) fcode].name);
14150 return const0_rtx;
14153 rtx tcb_hwcap = gen_reg_rtx (SImode);
14154 rtx tcbmem = gen_const_mem (SImode,
14155 gen_rtx_PLUS (Pmode,
14156 gen_rtx_REG (Pmode, TLS_REGNUM),
14157 GEN_INT (hwcap_offset)));
14158 emit_move_insn (tcb_hwcap, tcbmem);
14159 rtx scratch1 = gen_reg_rtx (SImode);
14160 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14161 rtx scratch2 = gen_reg_rtx (SImode);
14162 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14163 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14166 /* Record that we have expanded a CPU builtin, so that we can later
14167 emit a reference to the special symbol exported by LIBC to ensure we
14168 do not link against an old LIBC that doesn't support this feature. */
14169 cpu_builtin_p = true;
14171 #else
14172 /* For old LIBCs, always return FALSE. */
14173 emit_move_insn (target, GEN_INT (0));
14174 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14176 return target;
14179 static rtx
14180 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14182 rtx pat;
14183 tree arg0 = CALL_EXPR_ARG (exp, 0);
14184 tree arg1 = CALL_EXPR_ARG (exp, 1);
14185 tree arg2 = CALL_EXPR_ARG (exp, 2);
14186 rtx op0 = expand_normal (arg0);
14187 rtx op1 = expand_normal (arg1);
14188 rtx op2 = expand_normal (arg2);
14189 machine_mode tmode = insn_data[icode].operand[0].mode;
14190 machine_mode mode0 = insn_data[icode].operand[1].mode;
14191 machine_mode mode1 = insn_data[icode].operand[2].mode;
14192 machine_mode mode2 = insn_data[icode].operand[3].mode;
14194 if (icode == CODE_FOR_nothing)
14195 /* Builtin not supported on this processor. */
14196 return 0;
14198 /* If we got invalid arguments bail out before generating bad rtl. */
14199 if (arg0 == error_mark_node
14200 || arg1 == error_mark_node
14201 || arg2 == error_mark_node)
14202 return const0_rtx;
14204 /* Check and prepare argument depending on the instruction code.
14206 Note that a switch statement instead of the sequence of tests
14207 would be incorrect as many of the CODE_FOR values could be
14208 CODE_FOR_nothing and that would yield multiple alternatives
14209 with identical values. We'd never reach here at runtime in
14210 this case. */
14211 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14212 || icode == CODE_FOR_altivec_vsldoi_v4si
14213 || icode == CODE_FOR_altivec_vsldoi_v8hi
14214 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14216 /* Only allow 4-bit unsigned literals. */
14217 STRIP_NOPS (arg2);
14218 if (TREE_CODE (arg2) != INTEGER_CST
14219 || TREE_INT_CST_LOW (arg2) & ~0xf)
14221 error ("argument 3 must be a 4-bit unsigned literal");
14222 return const0_rtx;
14225 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14226 || icode == CODE_FOR_vsx_xxpermdi_v2di
14227 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14228 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14229 || icode == CODE_FOR_vsx_xxsldwi_v4si
14230 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14231 || icode == CODE_FOR_vsx_xxsldwi_v2di
14232 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14234 /* Only allow 2-bit unsigned literals. */
14235 STRIP_NOPS (arg2);
14236 if (TREE_CODE (arg2) != INTEGER_CST
14237 || TREE_INT_CST_LOW (arg2) & ~0x3)
14239 error ("argument 3 must be a 2-bit unsigned literal");
14240 return const0_rtx;
14243 else if (icode == CODE_FOR_vsx_set_v2df
14244 || icode == CODE_FOR_vsx_set_v2di
14245 || icode == CODE_FOR_bcdadd
14246 || icode == CODE_FOR_bcdadd_lt
14247 || icode == CODE_FOR_bcdadd_eq
14248 || icode == CODE_FOR_bcdadd_gt
14249 || icode == CODE_FOR_bcdsub
14250 || icode == CODE_FOR_bcdsub_lt
14251 || icode == CODE_FOR_bcdsub_eq
14252 || icode == CODE_FOR_bcdsub_gt)
14254 /* Only allow 1-bit unsigned literals. */
14255 STRIP_NOPS (arg2);
14256 if (TREE_CODE (arg2) != INTEGER_CST
14257 || TREE_INT_CST_LOW (arg2) & ~0x1)
14259 error ("argument 3 must be a 1-bit unsigned literal");
14260 return const0_rtx;
14263 else if (icode == CODE_FOR_dfp_ddedpd_dd
14264 || icode == CODE_FOR_dfp_ddedpd_td)
14266 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14267 STRIP_NOPS (arg0);
14268 if (TREE_CODE (arg0) != INTEGER_CST
14269 || TREE_INT_CST_LOW (arg2) & ~0x3)
14271 error ("argument 1 must be 0 or 2");
14272 return const0_rtx;
14275 else if (icode == CODE_FOR_dfp_denbcd_dd
14276 || icode == CODE_FOR_dfp_denbcd_td)
14278 /* Only allow 1-bit unsigned literals. */
14279 STRIP_NOPS (arg0);
14280 if (TREE_CODE (arg0) != INTEGER_CST
14281 || TREE_INT_CST_LOW (arg0) & ~0x1)
14283 error ("argument 1 must be a 1-bit unsigned literal");
14284 return const0_rtx;
14287 else if (icode == CODE_FOR_dfp_dscli_dd
14288 || icode == CODE_FOR_dfp_dscli_td
14289 || icode == CODE_FOR_dfp_dscri_dd
14290 || icode == CODE_FOR_dfp_dscri_td)
14292 /* Only allow 6-bit unsigned literals. */
14293 STRIP_NOPS (arg1);
14294 if (TREE_CODE (arg1) != INTEGER_CST
14295 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14297 error ("argument 2 must be a 6-bit unsigned literal");
14298 return const0_rtx;
14301 else if (icode == CODE_FOR_crypto_vshasigmaw
14302 || icode == CODE_FOR_crypto_vshasigmad)
14304 /* Check whether the 2nd and 3rd arguments are integer constants and in
14305 range and prepare arguments. */
14306 STRIP_NOPS (arg1);
14307 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
14309 error ("argument 2 must be 0 or 1");
14310 return const0_rtx;
14313 STRIP_NOPS (arg2);
14314 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
14316 error ("argument 3 must be in the range 0..15");
14317 return const0_rtx;
14321 if (target == 0
14322 || GET_MODE (target) != tmode
14323 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14324 target = gen_reg_rtx (tmode);
14326 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14327 op0 = copy_to_mode_reg (mode0, op0);
14328 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14329 op1 = copy_to_mode_reg (mode1, op1);
14330 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14331 op2 = copy_to_mode_reg (mode2, op2);
14333 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
14334 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
14335 else
14336 pat = GEN_FCN (icode) (target, op0, op1, op2);
14337 if (! pat)
14338 return 0;
14339 emit_insn (pat);
14341 return target;
14344 /* Expand the lvx builtins. */
14345 static rtx
14346 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
14348 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14349 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14350 tree arg0;
14351 machine_mode tmode, mode0;
14352 rtx pat, op0;
14353 enum insn_code icode;
14355 switch (fcode)
14357 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
14358 icode = CODE_FOR_vector_altivec_load_v16qi;
14359 break;
14360 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
14361 icode = CODE_FOR_vector_altivec_load_v8hi;
14362 break;
14363 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
14364 icode = CODE_FOR_vector_altivec_load_v4si;
14365 break;
14366 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
14367 icode = CODE_FOR_vector_altivec_load_v4sf;
14368 break;
14369 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
14370 icode = CODE_FOR_vector_altivec_load_v2df;
14371 break;
14372 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
14373 icode = CODE_FOR_vector_altivec_load_v2di;
14374 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
14375 icode = CODE_FOR_vector_altivec_load_v1ti;
14376 break;
14377 default:
14378 *expandedp = false;
14379 return NULL_RTX;
14382 *expandedp = true;
14384 arg0 = CALL_EXPR_ARG (exp, 0);
14385 op0 = expand_normal (arg0);
14386 tmode = insn_data[icode].operand[0].mode;
14387 mode0 = insn_data[icode].operand[1].mode;
14389 if (target == 0
14390 || GET_MODE (target) != tmode
14391 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14392 target = gen_reg_rtx (tmode);
14394 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14395 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14397 pat = GEN_FCN (icode) (target, op0);
14398 if (! pat)
14399 return 0;
14400 emit_insn (pat);
14401 return target;
14404 /* Expand the stvx builtins. */
14405 static rtx
14406 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14407 bool *expandedp)
14409 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14410 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14411 tree arg0, arg1;
14412 machine_mode mode0, mode1;
14413 rtx pat, op0, op1;
14414 enum insn_code icode;
14416 switch (fcode)
14418 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
14419 icode = CODE_FOR_vector_altivec_store_v16qi;
14420 break;
14421 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
14422 icode = CODE_FOR_vector_altivec_store_v8hi;
14423 break;
14424 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
14425 icode = CODE_FOR_vector_altivec_store_v4si;
14426 break;
14427 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
14428 icode = CODE_FOR_vector_altivec_store_v4sf;
14429 break;
14430 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
14431 icode = CODE_FOR_vector_altivec_store_v2df;
14432 break;
14433 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
14434 icode = CODE_FOR_vector_altivec_store_v2di;
14435 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
14436 icode = CODE_FOR_vector_altivec_store_v1ti;
14437 break;
14438 default:
14439 *expandedp = false;
14440 return NULL_RTX;
14443 arg0 = CALL_EXPR_ARG (exp, 0);
14444 arg1 = CALL_EXPR_ARG (exp, 1);
14445 op0 = expand_normal (arg0);
14446 op1 = expand_normal (arg1);
14447 mode0 = insn_data[icode].operand[0].mode;
14448 mode1 = insn_data[icode].operand[1].mode;
14450 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14451 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14452 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14453 op1 = copy_to_mode_reg (mode1, op1);
14455 pat = GEN_FCN (icode) (op0, op1);
14456 if (pat)
14457 emit_insn (pat);
14459 *expandedp = true;
14460 return NULL_RTX;
14463 /* Expand the dst builtins. */
14464 static rtx
14465 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14466 bool *expandedp)
14468 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14469 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14470 tree arg0, arg1, arg2;
14471 machine_mode mode0, mode1;
14472 rtx pat, op0, op1, op2;
14473 const struct builtin_description *d;
14474 size_t i;
14476 *expandedp = false;
14478 /* Handle DST variants. */
14479 d = bdesc_dst;
14480 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14481 if (d->code == fcode)
14483 arg0 = CALL_EXPR_ARG (exp, 0);
14484 arg1 = CALL_EXPR_ARG (exp, 1);
14485 arg2 = CALL_EXPR_ARG (exp, 2);
14486 op0 = expand_normal (arg0);
14487 op1 = expand_normal (arg1);
14488 op2 = expand_normal (arg2);
14489 mode0 = insn_data[d->icode].operand[0].mode;
14490 mode1 = insn_data[d->icode].operand[1].mode;
14492 /* Invalid arguments, bail out before generating bad rtl. */
14493 if (arg0 == error_mark_node
14494 || arg1 == error_mark_node
14495 || arg2 == error_mark_node)
14496 return const0_rtx;
14498 *expandedp = true;
14499 STRIP_NOPS (arg2);
14500 if (TREE_CODE (arg2) != INTEGER_CST
14501 || TREE_INT_CST_LOW (arg2) & ~0x3)
14503 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14504 return const0_rtx;
14507 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14508 op0 = copy_to_mode_reg (Pmode, op0);
14509 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14510 op1 = copy_to_mode_reg (mode1, op1);
14512 pat = GEN_FCN (d->icode) (op0, op1, op2);
14513 if (pat != 0)
14514 emit_insn (pat);
14516 return NULL_RTX;
14519 return NULL_RTX;
14522 /* Expand vec_init builtin. */
14523 static rtx
14524 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14526 machine_mode tmode = TYPE_MODE (type);
14527 machine_mode inner_mode = GET_MODE_INNER (tmode);
14528 int i, n_elt = GET_MODE_NUNITS (tmode);
14530 gcc_assert (VECTOR_MODE_P (tmode));
14531 gcc_assert (n_elt == call_expr_nargs (exp));
14533 if (!target || !register_operand (target, tmode))
14534 target = gen_reg_rtx (tmode);
14536 /* If we have a vector compromised of a single element, such as V1TImode, do
14537 the initialization directly. */
14538 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14540 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14541 emit_move_insn (target, gen_lowpart (tmode, x));
14543 else
14545 rtvec v = rtvec_alloc (n_elt);
14547 for (i = 0; i < n_elt; ++i)
14549 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14550 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14553 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14556 return target;
14559 /* Return the integer constant in ARG. Constrain it to be in the range
14560 of the subparts of VEC_TYPE; issue an error if not. */
14562 static int
14563 get_element_number (tree vec_type, tree arg)
14565 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14567 if (!tree_fits_uhwi_p (arg)
14568 || (elt = tree_to_uhwi (arg), elt > max))
14570 error ("selector must be an integer constant in the range 0..%wi", max);
14571 return 0;
14574 return elt;
14577 /* Expand vec_set builtin. */
14578 static rtx
14579 altivec_expand_vec_set_builtin (tree exp)
14581 machine_mode tmode, mode1;
14582 tree arg0, arg1, arg2;
14583 int elt;
14584 rtx op0, op1;
14586 arg0 = CALL_EXPR_ARG (exp, 0);
14587 arg1 = CALL_EXPR_ARG (exp, 1);
14588 arg2 = CALL_EXPR_ARG (exp, 2);
14590 tmode = TYPE_MODE (TREE_TYPE (arg0));
14591 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14592 gcc_assert (VECTOR_MODE_P (tmode));
14594 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14595 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14596 elt = get_element_number (TREE_TYPE (arg0), arg2);
14598 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14599 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14601 op0 = force_reg (tmode, op0);
14602 op1 = force_reg (mode1, op1);
14604 rs6000_expand_vector_set (op0, op1, elt);
14606 return op0;
14609 /* Expand vec_ext builtin. */
14610 static rtx
14611 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14613 machine_mode tmode, mode0;
14614 tree arg0, arg1;
14615 int elt;
14616 rtx op0;
14618 arg0 = CALL_EXPR_ARG (exp, 0);
14619 arg1 = CALL_EXPR_ARG (exp, 1);
14621 op0 = expand_normal (arg0);
14622 elt = get_element_number (TREE_TYPE (arg0), arg1);
14624 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14625 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14626 gcc_assert (VECTOR_MODE_P (mode0));
14628 op0 = force_reg (mode0, op0);
14630 if (optimize || !target || !register_operand (target, tmode))
14631 target = gen_reg_rtx (tmode);
14633 rs6000_expand_vector_extract (target, op0, elt);
14635 return target;
14638 /* Expand the builtin in EXP and store the result in TARGET. Store
14639 true in *EXPANDEDP if we found a builtin to expand. */
14640 static rtx
14641 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14643 const struct builtin_description *d;
14644 size_t i;
14645 enum insn_code icode;
14646 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14647 tree arg0;
14648 rtx op0, pat;
14649 machine_mode tmode, mode0;
14650 enum rs6000_builtins fcode
14651 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14653 if (rs6000_overloaded_builtin_p (fcode))
14655 *expandedp = true;
14656 error ("unresolved overload for Altivec builtin %qF", fndecl);
14658 /* Given it is invalid, just generate a normal call. */
14659 return expand_call (exp, target, false);
14662 target = altivec_expand_ld_builtin (exp, target, expandedp);
14663 if (*expandedp)
14664 return target;
14666 target = altivec_expand_st_builtin (exp, target, expandedp);
14667 if (*expandedp)
14668 return target;
14670 target = altivec_expand_dst_builtin (exp, target, expandedp);
14671 if (*expandedp)
14672 return target;
14674 *expandedp = true;
14676 switch (fcode)
14678 case ALTIVEC_BUILTIN_STVX_V2DF:
14679 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
14680 case ALTIVEC_BUILTIN_STVX_V2DI:
14681 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
14682 case ALTIVEC_BUILTIN_STVX_V4SF:
14683 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
14684 case ALTIVEC_BUILTIN_STVX:
14685 case ALTIVEC_BUILTIN_STVX_V4SI:
14686 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
14687 case ALTIVEC_BUILTIN_STVX_V8HI:
14688 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
14689 case ALTIVEC_BUILTIN_STVX_V16QI:
14690 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
14691 case ALTIVEC_BUILTIN_STVEBX:
14692 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14693 case ALTIVEC_BUILTIN_STVEHX:
14694 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14695 case ALTIVEC_BUILTIN_STVEWX:
14696 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14697 case ALTIVEC_BUILTIN_STVXL_V2DF:
14698 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14699 case ALTIVEC_BUILTIN_STVXL_V2DI:
14700 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14701 case ALTIVEC_BUILTIN_STVXL_V4SF:
14702 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14703 case ALTIVEC_BUILTIN_STVXL:
14704 case ALTIVEC_BUILTIN_STVXL_V4SI:
14705 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14706 case ALTIVEC_BUILTIN_STVXL_V8HI:
14707 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14708 case ALTIVEC_BUILTIN_STVXL_V16QI:
14709 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14711 case ALTIVEC_BUILTIN_STVLX:
14712 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14713 case ALTIVEC_BUILTIN_STVLXL:
14714 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14715 case ALTIVEC_BUILTIN_STVRX:
14716 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14717 case ALTIVEC_BUILTIN_STVRXL:
14718 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14720 case VSX_BUILTIN_STXVD2X_V1TI:
14721 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14722 case VSX_BUILTIN_STXVD2X_V2DF:
14723 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14724 case VSX_BUILTIN_STXVD2X_V2DI:
14725 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14726 case VSX_BUILTIN_STXVW4X_V4SF:
14727 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14728 case VSX_BUILTIN_STXVW4X_V4SI:
14729 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14730 case VSX_BUILTIN_STXVW4X_V8HI:
14731 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14732 case VSX_BUILTIN_STXVW4X_V16QI:
14733 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14735 /* For the following on big endian, it's ok to use any appropriate
14736 unaligned-supporting store, so use a generic expander. For
14737 little-endian, the exact element-reversing instruction must
14738 be used. */
14739 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14741 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14742 : CODE_FOR_vsx_st_elemrev_v2df);
14743 return altivec_expand_stv_builtin (code, exp);
14745 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14747 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14748 : CODE_FOR_vsx_st_elemrev_v2di);
14749 return altivec_expand_stv_builtin (code, exp);
14751 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14753 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14754 : CODE_FOR_vsx_st_elemrev_v4sf);
14755 return altivec_expand_stv_builtin (code, exp);
14757 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14759 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14760 : CODE_FOR_vsx_st_elemrev_v4si);
14761 return altivec_expand_stv_builtin (code, exp);
14763 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14765 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14766 : CODE_FOR_vsx_st_elemrev_v8hi);
14767 return altivec_expand_stv_builtin (code, exp);
14769 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14771 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14772 : CODE_FOR_vsx_st_elemrev_v16qi);
14773 return altivec_expand_stv_builtin (code, exp);
14776 case ALTIVEC_BUILTIN_MFVSCR:
14777 icode = CODE_FOR_altivec_mfvscr;
14778 tmode = insn_data[icode].operand[0].mode;
14780 if (target == 0
14781 || GET_MODE (target) != tmode
14782 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14783 target = gen_reg_rtx (tmode);
14785 pat = GEN_FCN (icode) (target);
14786 if (! pat)
14787 return 0;
14788 emit_insn (pat);
14789 return target;
14791 case ALTIVEC_BUILTIN_MTVSCR:
14792 icode = CODE_FOR_altivec_mtvscr;
14793 arg0 = CALL_EXPR_ARG (exp, 0);
14794 op0 = expand_normal (arg0);
14795 mode0 = insn_data[icode].operand[0].mode;
14797 /* If we got invalid arguments bail out before generating bad rtl. */
14798 if (arg0 == error_mark_node)
14799 return const0_rtx;
14801 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14802 op0 = copy_to_mode_reg (mode0, op0);
14804 pat = GEN_FCN (icode) (op0);
14805 if (pat)
14806 emit_insn (pat);
14807 return NULL_RTX;
14809 case ALTIVEC_BUILTIN_DSSALL:
14810 emit_insn (gen_altivec_dssall ());
14811 return NULL_RTX;
14813 case ALTIVEC_BUILTIN_DSS:
14814 icode = CODE_FOR_altivec_dss;
14815 arg0 = CALL_EXPR_ARG (exp, 0);
14816 STRIP_NOPS (arg0);
14817 op0 = expand_normal (arg0);
14818 mode0 = insn_data[icode].operand[0].mode;
14820 /* If we got invalid arguments bail out before generating bad rtl. */
14821 if (arg0 == error_mark_node)
14822 return const0_rtx;
14824 if (TREE_CODE (arg0) != INTEGER_CST
14825 || TREE_INT_CST_LOW (arg0) & ~0x3)
14827 error ("argument to dss must be a 2-bit unsigned literal");
14828 return const0_rtx;
14831 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14832 op0 = copy_to_mode_reg (mode0, op0);
14834 emit_insn (gen_altivec_dss (op0));
14835 return NULL_RTX;
14837 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14838 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14839 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14840 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14841 case VSX_BUILTIN_VEC_INIT_V2DF:
14842 case VSX_BUILTIN_VEC_INIT_V2DI:
14843 case VSX_BUILTIN_VEC_INIT_V1TI:
14844 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14846 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14847 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14848 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14849 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14850 case VSX_BUILTIN_VEC_SET_V2DF:
14851 case VSX_BUILTIN_VEC_SET_V2DI:
14852 case VSX_BUILTIN_VEC_SET_V1TI:
14853 return altivec_expand_vec_set_builtin (exp);
14855 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14856 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14857 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14858 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14859 case VSX_BUILTIN_VEC_EXT_V2DF:
14860 case VSX_BUILTIN_VEC_EXT_V2DI:
14861 case VSX_BUILTIN_VEC_EXT_V1TI:
14862 return altivec_expand_vec_ext_builtin (exp, target);
14864 default:
14865 break;
14866 /* Fall through. */
14869 /* Expand abs* operations. */
14870 d = bdesc_abs;
14871 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14872 if (d->code == fcode)
14873 return altivec_expand_abs_builtin (d->icode, exp, target);
14875 /* Expand the AltiVec predicates. */
14876 d = bdesc_altivec_preds;
14877 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14878 if (d->code == fcode)
14879 return altivec_expand_predicate_builtin (d->icode, exp, target);
14881 /* LV* are funky. We initialized them differently. */
14882 switch (fcode)
14884 case ALTIVEC_BUILTIN_LVSL:
14885 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14886 exp, target, false);
14887 case ALTIVEC_BUILTIN_LVSR:
14888 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14889 exp, target, false);
14890 case ALTIVEC_BUILTIN_LVEBX:
14891 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14892 exp, target, false);
14893 case ALTIVEC_BUILTIN_LVEHX:
14894 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14895 exp, target, false);
14896 case ALTIVEC_BUILTIN_LVEWX:
14897 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14898 exp, target, false);
14899 case ALTIVEC_BUILTIN_LVXL_V2DF:
14900 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14901 exp, target, false);
14902 case ALTIVEC_BUILTIN_LVXL_V2DI:
14903 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14904 exp, target, false);
14905 case ALTIVEC_BUILTIN_LVXL_V4SF:
14906 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14907 exp, target, false);
14908 case ALTIVEC_BUILTIN_LVXL:
14909 case ALTIVEC_BUILTIN_LVXL_V4SI:
14910 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14911 exp, target, false);
14912 case ALTIVEC_BUILTIN_LVXL_V8HI:
14913 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14914 exp, target, false);
14915 case ALTIVEC_BUILTIN_LVXL_V16QI:
14916 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14917 exp, target, false);
14918 case ALTIVEC_BUILTIN_LVX_V2DF:
14919 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
14920 exp, target, false);
14921 case ALTIVEC_BUILTIN_LVX_V2DI:
14922 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
14923 exp, target, false);
14924 case ALTIVEC_BUILTIN_LVX_V4SF:
14925 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
14926 exp, target, false);
14927 case ALTIVEC_BUILTIN_LVX:
14928 case ALTIVEC_BUILTIN_LVX_V4SI:
14929 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
14930 exp, target, false);
14931 case ALTIVEC_BUILTIN_LVX_V8HI:
14932 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
14933 exp, target, false);
14934 case ALTIVEC_BUILTIN_LVX_V16QI:
14935 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
14936 exp, target, false);
14937 case ALTIVEC_BUILTIN_LVLX:
14938 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14939 exp, target, true);
14940 case ALTIVEC_BUILTIN_LVLXL:
14941 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14942 exp, target, true);
14943 case ALTIVEC_BUILTIN_LVRX:
14944 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14945 exp, target, true);
14946 case ALTIVEC_BUILTIN_LVRXL:
14947 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14948 exp, target, true);
14949 case VSX_BUILTIN_LXVD2X_V1TI:
14950 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14951 exp, target, false);
14952 case VSX_BUILTIN_LXVD2X_V2DF:
14953 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14954 exp, target, false);
14955 case VSX_BUILTIN_LXVD2X_V2DI:
14956 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14957 exp, target, false);
14958 case VSX_BUILTIN_LXVW4X_V4SF:
14959 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14960 exp, target, false);
14961 case VSX_BUILTIN_LXVW4X_V4SI:
14962 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14963 exp, target, false);
14964 case VSX_BUILTIN_LXVW4X_V8HI:
14965 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14966 exp, target, false);
14967 case VSX_BUILTIN_LXVW4X_V16QI:
14968 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14969 exp, target, false);
14970 /* For the following on big endian, it's ok to use any appropriate
14971 unaligned-supporting load, so use a generic expander. For
14972 little-endian, the exact element-reversing instruction must
14973 be used. */
14974 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14976 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14977 : CODE_FOR_vsx_ld_elemrev_v2df);
14978 return altivec_expand_lv_builtin (code, exp, target, false);
14980 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14982 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14983 : CODE_FOR_vsx_ld_elemrev_v2di);
14984 return altivec_expand_lv_builtin (code, exp, target, false);
14986 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14988 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14989 : CODE_FOR_vsx_ld_elemrev_v4sf);
14990 return altivec_expand_lv_builtin (code, exp, target, false);
14992 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14994 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14995 : CODE_FOR_vsx_ld_elemrev_v4si);
14996 return altivec_expand_lv_builtin (code, exp, target, false);
14998 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15000 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15001 : CODE_FOR_vsx_ld_elemrev_v8hi);
15002 return altivec_expand_lv_builtin (code, exp, target, false);
15004 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15006 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15007 : CODE_FOR_vsx_ld_elemrev_v16qi);
15008 return altivec_expand_lv_builtin (code, exp, target, false);
15010 break;
15011 default:
15012 break;
15013 /* Fall through. */
15016 *expandedp = false;
15017 return NULL_RTX;
15020 /* Expand the builtin in EXP and store the result in TARGET. Store
15021 true in *EXPANDEDP if we found a builtin to expand. */
15022 static rtx
15023 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15025 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15026 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15027 const struct builtin_description *d;
15028 size_t i;
15030 *expandedp = true;
15032 switch (fcode)
15034 case PAIRED_BUILTIN_STX:
15035 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15036 case PAIRED_BUILTIN_LX:
15037 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15038 default:
15039 break;
15040 /* Fall through. */
15043 /* Expand the paired predicates. */
15044 d = bdesc_paired_preds;
15045 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15046 if (d->code == fcode)
15047 return paired_expand_predicate_builtin (d->icode, exp, target);
15049 *expandedp = false;
15050 return NULL_RTX;
15053 /* Binops that need to be initialized manually, but can be expanded
15054 automagically by rs6000_expand_binop_builtin. */
15055 static const struct builtin_description bdesc_2arg_spe[] =
15057 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
15058 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
15059 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
15060 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
15061 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
15062 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
15063 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
15064 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
15065 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
15066 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
15067 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
15068 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
15069 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
15070 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
15071 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
15072 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
15073 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
15074 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
15075 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
15076 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
15077 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
15078 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
15081 /* Expand the builtin in EXP and store the result in TARGET. Store
15082 true in *EXPANDEDP if we found a builtin to expand.
15084 This expands the SPE builtins that are not simple unary and binary
15085 operations. */
15086 static rtx
15087 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
15089 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15090 tree arg1, arg0;
15091 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15092 enum insn_code icode;
15093 machine_mode tmode, mode0;
15094 rtx pat, op0;
15095 const struct builtin_description *d;
15096 size_t i;
15098 *expandedp = true;
15100 /* Syntax check for a 5-bit unsigned immediate. */
15101 switch (fcode)
15103 case SPE_BUILTIN_EVSTDD:
15104 case SPE_BUILTIN_EVSTDH:
15105 case SPE_BUILTIN_EVSTDW:
15106 case SPE_BUILTIN_EVSTWHE:
15107 case SPE_BUILTIN_EVSTWHO:
15108 case SPE_BUILTIN_EVSTWWE:
15109 case SPE_BUILTIN_EVSTWWO:
15110 arg1 = CALL_EXPR_ARG (exp, 2);
15111 if (TREE_CODE (arg1) != INTEGER_CST
15112 || TREE_INT_CST_LOW (arg1) & ~0x1f)
15114 error ("argument 2 must be a 5-bit unsigned literal");
15115 return const0_rtx;
15117 break;
15118 default:
15119 break;
15122 /* The evsplat*i instructions are not quite generic. */
15123 switch (fcode)
15125 case SPE_BUILTIN_EVSPLATFI:
15126 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
15127 exp, target);
15128 case SPE_BUILTIN_EVSPLATI:
15129 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
15130 exp, target);
15131 default:
15132 break;
15135 d = bdesc_2arg_spe;
15136 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
15137 if (d->code == fcode)
15138 return rs6000_expand_binop_builtin (d->icode, exp, target);
15140 d = bdesc_spe_predicates;
15141 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
15142 if (d->code == fcode)
15143 return spe_expand_predicate_builtin (d->icode, exp, target);
15145 d = bdesc_spe_evsel;
15146 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
15147 if (d->code == fcode)
15148 return spe_expand_evsel_builtin (d->icode, exp, target);
15150 switch (fcode)
15152 case SPE_BUILTIN_EVSTDDX:
15153 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
15154 case SPE_BUILTIN_EVSTDHX:
15155 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
15156 case SPE_BUILTIN_EVSTDWX:
15157 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
15158 case SPE_BUILTIN_EVSTWHEX:
15159 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
15160 case SPE_BUILTIN_EVSTWHOX:
15161 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
15162 case SPE_BUILTIN_EVSTWWEX:
15163 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
15164 case SPE_BUILTIN_EVSTWWOX:
15165 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
15166 case SPE_BUILTIN_EVSTDD:
15167 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
15168 case SPE_BUILTIN_EVSTDH:
15169 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
15170 case SPE_BUILTIN_EVSTDW:
15171 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
15172 case SPE_BUILTIN_EVSTWHE:
15173 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
15174 case SPE_BUILTIN_EVSTWHO:
15175 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
15176 case SPE_BUILTIN_EVSTWWE:
15177 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
15178 case SPE_BUILTIN_EVSTWWO:
15179 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
15180 case SPE_BUILTIN_MFSPEFSCR:
15181 icode = CODE_FOR_spe_mfspefscr;
15182 tmode = insn_data[icode].operand[0].mode;
15184 if (target == 0
15185 || GET_MODE (target) != tmode
15186 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15187 target = gen_reg_rtx (tmode);
15189 pat = GEN_FCN (icode) (target);
15190 if (! pat)
15191 return 0;
15192 emit_insn (pat);
15193 return target;
15194 case SPE_BUILTIN_MTSPEFSCR:
15195 icode = CODE_FOR_spe_mtspefscr;
15196 arg0 = CALL_EXPR_ARG (exp, 0);
15197 op0 = expand_normal (arg0);
15198 mode0 = insn_data[icode].operand[0].mode;
15200 if (arg0 == error_mark_node)
15201 return const0_rtx;
15203 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15204 op0 = copy_to_mode_reg (mode0, op0);
15206 pat = GEN_FCN (icode) (op0);
15207 if (pat)
15208 emit_insn (pat);
15209 return NULL_RTX;
15210 default:
15211 break;
15214 *expandedp = false;
15215 return NULL_RTX;
15218 static rtx
15219 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15221 rtx pat, scratch, tmp;
15222 tree form = CALL_EXPR_ARG (exp, 0);
15223 tree arg0 = CALL_EXPR_ARG (exp, 1);
15224 tree arg1 = CALL_EXPR_ARG (exp, 2);
15225 rtx op0 = expand_normal (arg0);
15226 rtx op1 = expand_normal (arg1);
15227 machine_mode mode0 = insn_data[icode].operand[1].mode;
15228 machine_mode mode1 = insn_data[icode].operand[2].mode;
15229 int form_int;
15230 enum rtx_code code;
15232 if (TREE_CODE (form) != INTEGER_CST)
15234 error ("argument 1 of __builtin_paired_predicate must be a constant");
15235 return const0_rtx;
15237 else
15238 form_int = TREE_INT_CST_LOW (form);
15240 gcc_assert (mode0 == mode1);
15242 if (arg0 == error_mark_node || arg1 == error_mark_node)
15243 return const0_rtx;
15245 if (target == 0
15246 || GET_MODE (target) != SImode
15247 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15248 target = gen_reg_rtx (SImode);
15249 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15250 op0 = copy_to_mode_reg (mode0, op0);
15251 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15252 op1 = copy_to_mode_reg (mode1, op1);
15254 scratch = gen_reg_rtx (CCFPmode);
15256 pat = GEN_FCN (icode) (scratch, op0, op1);
15257 if (!pat)
15258 return const0_rtx;
15260 emit_insn (pat);
15262 switch (form_int)
15264 /* LT bit. */
15265 case 0:
15266 code = LT;
15267 break;
15268 /* GT bit. */
15269 case 1:
15270 code = GT;
15271 break;
15272 /* EQ bit. */
15273 case 2:
15274 code = EQ;
15275 break;
15276 /* UN bit. */
15277 case 3:
15278 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15279 return target;
15280 default:
15281 error ("argument 1 of __builtin_paired_predicate is out of range");
15282 return const0_rtx;
15285 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15286 emit_move_insn (target, tmp);
15287 return target;
15290 static rtx
15291 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15293 rtx pat, scratch, tmp;
15294 tree form = CALL_EXPR_ARG (exp, 0);
15295 tree arg0 = CALL_EXPR_ARG (exp, 1);
15296 tree arg1 = CALL_EXPR_ARG (exp, 2);
15297 rtx op0 = expand_normal (arg0);
15298 rtx op1 = expand_normal (arg1);
15299 machine_mode mode0 = insn_data[icode].operand[1].mode;
15300 machine_mode mode1 = insn_data[icode].operand[2].mode;
15301 int form_int;
15302 enum rtx_code code;
15304 if (TREE_CODE (form) != INTEGER_CST)
15306 error ("argument 1 of __builtin_spe_predicate must be a constant");
15307 return const0_rtx;
15309 else
15310 form_int = TREE_INT_CST_LOW (form);
15312 gcc_assert (mode0 == mode1);
15314 if (arg0 == error_mark_node || arg1 == error_mark_node)
15315 return const0_rtx;
15317 if (target == 0
15318 || GET_MODE (target) != SImode
15319 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
15320 target = gen_reg_rtx (SImode);
15322 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15323 op0 = copy_to_mode_reg (mode0, op0);
15324 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15325 op1 = copy_to_mode_reg (mode1, op1);
15327 scratch = gen_reg_rtx (CCmode);
15329 pat = GEN_FCN (icode) (scratch, op0, op1);
15330 if (! pat)
15331 return const0_rtx;
15332 emit_insn (pat);
15334 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
15335 _lower_. We use one compare, but look in different bits of the
15336 CR for each variant.
15338 There are 2 elements in each SPE simd type (upper/lower). The CR
15339 bits are set as follows:
15341 BIT0 | BIT 1 | BIT 2 | BIT 3
15342 U | L | (U | L) | (U & L)
15344 So, for an "all" relationship, BIT 3 would be set.
15345 For an "any" relationship, BIT 2 would be set. Etc.
15347 Following traditional nomenclature, these bits map to:
15349 BIT0 | BIT 1 | BIT 2 | BIT 3
15350 LT | GT | EQ | OV
15352 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
15355 switch (form_int)
15357 /* All variant. OV bit. */
15358 case 0:
15359 /* We need to get to the OV bit, which is the ORDERED bit. We
15360 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
15361 that's ugly and will make validate_condition_mode die.
15362 So let's just use another pattern. */
15363 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15364 return target;
15365 /* Any variant. EQ bit. */
15366 case 1:
15367 code = EQ;
15368 break;
15369 /* Upper variant. LT bit. */
15370 case 2:
15371 code = LT;
15372 break;
15373 /* Lower variant. GT bit. */
15374 case 3:
15375 code = GT;
15376 break;
15377 default:
15378 error ("argument 1 of __builtin_spe_predicate is out of range");
15379 return const0_rtx;
15382 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15383 emit_move_insn (target, tmp);
15385 return target;
15388 /* The evsel builtins look like this:
15390 e = __builtin_spe_evsel_OP (a, b, c, d);
15392 and work like this:
15394 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
15395 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
15398 static rtx
15399 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
15401 rtx pat, scratch;
15402 tree arg0 = CALL_EXPR_ARG (exp, 0);
15403 tree arg1 = CALL_EXPR_ARG (exp, 1);
15404 tree arg2 = CALL_EXPR_ARG (exp, 2);
15405 tree arg3 = CALL_EXPR_ARG (exp, 3);
15406 rtx op0 = expand_normal (arg0);
15407 rtx op1 = expand_normal (arg1);
15408 rtx op2 = expand_normal (arg2);
15409 rtx op3 = expand_normal (arg3);
15410 machine_mode mode0 = insn_data[icode].operand[1].mode;
15411 machine_mode mode1 = insn_data[icode].operand[2].mode;
15413 gcc_assert (mode0 == mode1);
15415 if (arg0 == error_mark_node || arg1 == error_mark_node
15416 || arg2 == error_mark_node || arg3 == error_mark_node)
15417 return const0_rtx;
15419 if (target == 0
15420 || GET_MODE (target) != mode0
15421 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
15422 target = gen_reg_rtx (mode0);
15424 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15425 op0 = copy_to_mode_reg (mode0, op0);
15426 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15427 op1 = copy_to_mode_reg (mode0, op1);
15428 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
15429 op2 = copy_to_mode_reg (mode0, op2);
15430 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
15431 op3 = copy_to_mode_reg (mode0, op3);
15433 /* Generate the compare. */
15434 scratch = gen_reg_rtx (CCmode);
15435 pat = GEN_FCN (icode) (scratch, op0, op1);
15436 if (! pat)
15437 return const0_rtx;
15438 emit_insn (pat);
15440 if (mode0 == V2SImode)
15441 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
15442 else
15443 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
15445 return target;
15448 /* Raise an error message for a builtin function that is called without the
15449 appropriate target options being set. */
15451 static void
15452 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15454 size_t uns_fncode = (size_t)fncode;
15455 const char *name = rs6000_builtin_info[uns_fncode].name;
15456 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15458 gcc_assert (name != NULL);
15459 if ((fnmask & RS6000_BTM_CELL) != 0)
15460 error ("Builtin function %s is only valid for the cell processor", name);
15461 else if ((fnmask & RS6000_BTM_VSX) != 0)
15462 error ("Builtin function %s requires the -mvsx option", name);
15463 else if ((fnmask & RS6000_BTM_HTM) != 0)
15464 error ("Builtin function %s requires the -mhtm option", name);
15465 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15466 error ("Builtin function %s requires the -maltivec option", name);
15467 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
15468 error ("Builtin function %s requires the -mpaired option", name);
15469 else if ((fnmask & RS6000_BTM_SPE) != 0)
15470 error ("Builtin function %s requires the -mspe option", name);
15471 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15472 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15473 error ("Builtin function %s requires the -mhard-dfp and"
15474 " -mpower8-vector options", name);
15475 else if ((fnmask & RS6000_BTM_DFP) != 0)
15476 error ("Builtin function %s requires the -mhard-dfp option", name);
15477 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15478 error ("Builtin function %s requires the -mpower8-vector option", name);
15479 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15480 error ("Builtin function %s requires the -mpower9-vector option", name);
15481 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15482 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
15483 error ("Builtin function %s requires the -mhard-float and"
15484 " -mlong-double-128 options", name);
15485 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15486 error ("Builtin function %s requires the -mhard-float option", name);
15487 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15488 error ("Builtin function %s requires the -mfloat128 option", name);
15489 else
15490 error ("Builtin function %s is not supported with the current options",
15491 name);
15494 /* Target hook for early folding of built-ins, shamelessly stolen
15495 from ia64.c. */
15497 static tree
15498 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
15499 tree *args, bool ignore ATTRIBUTE_UNUSED)
15501 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
15503 enum rs6000_builtins fn_code
15504 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15505 switch (fn_code)
15507 case RS6000_BUILTIN_NANQ:
15508 case RS6000_BUILTIN_NANSQ:
15510 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15511 const char *str = c_getstr (*args);
15512 int quiet = fn_code == RS6000_BUILTIN_NANQ;
15513 REAL_VALUE_TYPE real;
15515 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
15516 return build_real (type, real);
15517 return NULL_TREE;
15519 case RS6000_BUILTIN_INFQ:
15520 case RS6000_BUILTIN_HUGE_VALQ:
15522 tree type = TREE_TYPE (TREE_TYPE (fndecl));
15523 REAL_VALUE_TYPE inf;
15524 real_inf (&inf);
15525 return build_real (type, inf);
15527 default:
15528 break;
15531 #ifdef SUBTARGET_FOLD_BUILTIN
15532 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15533 #else
15534 return NULL_TREE;
15535 #endif
15538 /* Expand an expression EXP that calls a built-in function,
15539 with result going to TARGET if that's convenient
15540 (and in mode MODE if that's convenient).
15541 SUBTARGET may be used as the target for computing one of EXP's operands.
15542 IGNORE is nonzero if the value is to be ignored. */
15544 static rtx
15545 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15546 machine_mode mode ATTRIBUTE_UNUSED,
15547 int ignore ATTRIBUTE_UNUSED)
15549 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15550 enum rs6000_builtins fcode
15551 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15552 size_t uns_fcode = (size_t)fcode;
15553 const struct builtin_description *d;
15554 size_t i;
15555 rtx ret;
15556 bool success;
15557 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15558 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15560 if (TARGET_DEBUG_BUILTIN)
15562 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15563 const char *name1 = rs6000_builtin_info[uns_fcode].name;
15564 const char *name2 = ((icode != CODE_FOR_nothing)
15565 ? get_insn_name ((int)icode)
15566 : "nothing");
15567 const char *name3;
15569 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
15571 default: name3 = "unknown"; break;
15572 case RS6000_BTC_SPECIAL: name3 = "special"; break;
15573 case RS6000_BTC_UNARY: name3 = "unary"; break;
15574 case RS6000_BTC_BINARY: name3 = "binary"; break;
15575 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
15576 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
15577 case RS6000_BTC_ABS: name3 = "abs"; break;
15578 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
15579 case RS6000_BTC_DST: name3 = "dst"; break;
15583 fprintf (stderr,
15584 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
15585 (name1) ? name1 : "---", fcode,
15586 (name2) ? name2 : "---", (int)icode,
15587 name3,
15588 func_valid_p ? "" : ", not valid");
15591 if (!func_valid_p)
15593 rs6000_invalid_builtin (fcode);
15595 /* Given it is invalid, just generate a normal call. */
15596 return expand_call (exp, target, ignore);
15599 switch (fcode)
15601 case RS6000_BUILTIN_RECIP:
15602 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
15604 case RS6000_BUILTIN_RECIPF:
15605 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
15607 case RS6000_BUILTIN_RSQRTF:
15608 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
15610 case RS6000_BUILTIN_RSQRT:
15611 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
15613 case POWER7_BUILTIN_BPERMD:
15614 return rs6000_expand_binop_builtin (((TARGET_64BIT)
15615 ? CODE_FOR_bpermd_di
15616 : CODE_FOR_bpermd_si), exp, target);
15618 case RS6000_BUILTIN_GET_TB:
15619 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
15620 target);
15622 case RS6000_BUILTIN_MFTB:
15623 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
15624 ? CODE_FOR_rs6000_mftb_di
15625 : CODE_FOR_rs6000_mftb_si),
15626 target);
15628 case RS6000_BUILTIN_MFFS:
15629 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
15631 case RS6000_BUILTIN_MTFSF:
15632 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
15634 case RS6000_BUILTIN_CPU_INIT:
15635 case RS6000_BUILTIN_CPU_IS:
15636 case RS6000_BUILTIN_CPU_SUPPORTS:
15637 return cpu_expand_builtin (fcode, exp, target);
15639 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
15640 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
15642 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
15643 : (int) CODE_FOR_altivec_lvsl_direct);
15644 machine_mode tmode = insn_data[icode].operand[0].mode;
15645 machine_mode mode = insn_data[icode].operand[1].mode;
15646 tree arg;
15647 rtx op, addr, pat;
15649 gcc_assert (TARGET_ALTIVEC);
15651 arg = CALL_EXPR_ARG (exp, 0);
15652 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
15653 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
15654 addr = memory_address (mode, op);
15655 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
15656 op = addr;
15657 else
15659 /* For the load case need to negate the address. */
15660 op = gen_reg_rtx (GET_MODE (addr));
15661 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
15663 op = gen_rtx_MEM (mode, op);
15665 if (target == 0
15666 || GET_MODE (target) != tmode
15667 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15668 target = gen_reg_rtx (tmode);
15670 pat = GEN_FCN (icode) (target, op);
15671 if (!pat)
15672 return 0;
15673 emit_insn (pat);
15675 return target;
15678 case ALTIVEC_BUILTIN_VCFUX:
15679 case ALTIVEC_BUILTIN_VCFSX:
15680 case ALTIVEC_BUILTIN_VCTUXS:
15681 case ALTIVEC_BUILTIN_VCTSXS:
15682 /* FIXME: There's got to be a nicer way to handle this case than
15683 constructing a new CALL_EXPR. */
15684 if (call_expr_nargs (exp) == 1)
15686 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
15687 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
15689 break;
15691 default:
15692 break;
15695 if (TARGET_ALTIVEC)
15697 ret = altivec_expand_builtin (exp, target, &success);
15699 if (success)
15700 return ret;
15702 if (TARGET_SPE)
15704 ret = spe_expand_builtin (exp, target, &success);
15706 if (success)
15707 return ret;
15709 if (TARGET_PAIRED_FLOAT)
15711 ret = paired_expand_builtin (exp, target, &success);
15713 if (success)
15714 return ret;
15716 if (TARGET_HTM)
15718 ret = htm_expand_builtin (exp, target, &success);
15720 if (success)
15721 return ret;
15724 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
15725 /* RS6000_BTC_SPECIAL represents no-operand operators. */
15726 gcc_assert (attr == RS6000_BTC_UNARY
15727 || attr == RS6000_BTC_BINARY
15728 || attr == RS6000_BTC_TERNARY
15729 || attr == RS6000_BTC_SPECIAL);
15731 /* Handle simple unary operations. */
15732 d = bdesc_1arg;
15733 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15734 if (d->code == fcode)
15735 return rs6000_expand_unop_builtin (d->icode, exp, target);
15737 /* Handle simple binary operations. */
15738 d = bdesc_2arg;
15739 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15740 if (d->code == fcode)
15741 return rs6000_expand_binop_builtin (d->icode, exp, target);
15743 /* Handle simple ternary operations. */
15744 d = bdesc_3arg;
15745 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15746 if (d->code == fcode)
15747 return rs6000_expand_ternop_builtin (d->icode, exp, target);
15749 /* Handle simple no-argument operations. */
15750 d = bdesc_0arg;
15751 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
15752 if (d->code == fcode)
15753 return rs6000_expand_zeroop_builtin (d->icode, target);
15755 gcc_unreachable ();
15758 static void
15759 rs6000_init_builtins (void)
15761 tree tdecl;
15762 tree ftype;
15763 machine_mode mode;
15765 if (TARGET_DEBUG_BUILTIN)
15766 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
15767 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
15768 (TARGET_SPE) ? ", spe" : "",
15769 (TARGET_ALTIVEC) ? ", altivec" : "",
15770 (TARGET_VSX) ? ", vsx" : "");
15772 V2SI_type_node = build_vector_type (intSI_type_node, 2);
15773 V2SF_type_node = build_vector_type (float_type_node, 2);
15774 V2DI_type_node = build_vector_type (intDI_type_node, 2);
15775 V2DF_type_node = build_vector_type (double_type_node, 2);
15776 V4HI_type_node = build_vector_type (intHI_type_node, 4);
15777 V4SI_type_node = build_vector_type (intSI_type_node, 4);
15778 V4SF_type_node = build_vector_type (float_type_node, 4);
15779 V8HI_type_node = build_vector_type (intHI_type_node, 8);
15780 V16QI_type_node = build_vector_type (intQI_type_node, 16);
15782 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
15783 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
15784 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
15785 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
15787 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
15788 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
15789 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
15790 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
15792 const_str_type_node
15793 = build_pointer_type (build_qualified_type (char_type_node,
15794 TYPE_QUAL_CONST));
15796 /* We use V1TI mode as a special container to hold __int128_t items that
15797 must live in VSX registers. */
15798 if (intTI_type_node)
15800 V1TI_type_node = build_vector_type (intTI_type_node, 1);
15801 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
15804 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
15805 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
15806 'vector unsigned short'. */
15808 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
15809 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
15810 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
15811 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
15812 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
15814 long_integer_type_internal_node = long_integer_type_node;
15815 long_unsigned_type_internal_node = long_unsigned_type_node;
15816 long_long_integer_type_internal_node = long_long_integer_type_node;
15817 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
15818 intQI_type_internal_node = intQI_type_node;
15819 uintQI_type_internal_node = unsigned_intQI_type_node;
15820 intHI_type_internal_node = intHI_type_node;
15821 uintHI_type_internal_node = unsigned_intHI_type_node;
15822 intSI_type_internal_node = intSI_type_node;
15823 uintSI_type_internal_node = unsigned_intSI_type_node;
15824 intDI_type_internal_node = intDI_type_node;
15825 uintDI_type_internal_node = unsigned_intDI_type_node;
15826 intTI_type_internal_node = intTI_type_node;
15827 uintTI_type_internal_node = unsigned_intTI_type_node;
15828 float_type_internal_node = float_type_node;
15829 double_type_internal_node = double_type_node;
15830 long_double_type_internal_node = long_double_type_node;
15831 dfloat64_type_internal_node = dfloat64_type_node;
15832 dfloat128_type_internal_node = dfloat128_type_node;
15833 void_type_internal_node = void_type_node;
15835 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
15836 IFmode is the IBM extended 128-bit format that is a pair of doubles.
15837 TFmode will be either IEEE 128-bit floating point or the IBM double-double
15838 format that uses a pair of doubles, depending on the switches and
15839 defaults. */
15840 if (TARGET_FLOAT128)
15842 ibm128_float_type_node = make_node (REAL_TYPE);
15843 TYPE_PRECISION (ibm128_float_type_node) = 128;
15844 layout_type (ibm128_float_type_node);
15845 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
15847 ieee128_float_type_node = make_node (REAL_TYPE);
15848 TYPE_PRECISION (ieee128_float_type_node) = 128;
15849 layout_type (ieee128_float_type_node);
15850 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
15852 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
15853 "__float128");
15855 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
15856 "__ibm128");
15858 else
15860 /* All types must be nonzero, or self-test barfs during bootstrap. */
15861 ieee128_float_type_node = long_double_type_node;
15862 ibm128_float_type_node = long_double_type_node;
15865 /* Initialize the modes for builtin_function_type, mapping a machine mode to
15866 tree type node. */
15867 builtin_mode_to_type[QImode][0] = integer_type_node;
15868 builtin_mode_to_type[HImode][0] = integer_type_node;
15869 builtin_mode_to_type[SImode][0] = intSI_type_node;
15870 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
15871 builtin_mode_to_type[DImode][0] = intDI_type_node;
15872 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
15873 builtin_mode_to_type[TImode][0] = intTI_type_node;
15874 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
15875 builtin_mode_to_type[SFmode][0] = float_type_node;
15876 builtin_mode_to_type[DFmode][0] = double_type_node;
15877 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
15878 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
15879 builtin_mode_to_type[TFmode][0] = long_double_type_node;
15880 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
15881 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
15882 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
15883 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
15884 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
15885 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
15886 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
15887 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
15888 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
15889 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
15890 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
15891 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
15892 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
15893 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
15894 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
15895 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
15896 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
15898 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
15899 TYPE_NAME (bool_char_type_node) = tdecl;
15901 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
15902 TYPE_NAME (bool_short_type_node) = tdecl;
15904 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
15905 TYPE_NAME (bool_int_type_node) = tdecl;
15907 tdecl = add_builtin_type ("__pixel", pixel_type_node);
15908 TYPE_NAME (pixel_type_node) = tdecl;
15910 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
15911 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
15912 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
15913 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
15914 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
15916 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
15917 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
15919 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
15920 TYPE_NAME (V16QI_type_node) = tdecl;
15922 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
15923 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
15925 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
15926 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
15928 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
15929 TYPE_NAME (V8HI_type_node) = tdecl;
15931 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
15932 TYPE_NAME (bool_V8HI_type_node) = tdecl;
15934 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
15935 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
15937 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
15938 TYPE_NAME (V4SI_type_node) = tdecl;
15940 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
15941 TYPE_NAME (bool_V4SI_type_node) = tdecl;
15943 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
15944 TYPE_NAME (V4SF_type_node) = tdecl;
15946 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
15947 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
15949 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
15950 TYPE_NAME (V2DF_type_node) = tdecl;
15952 if (TARGET_POWERPC64)
15954 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
15955 TYPE_NAME (V2DI_type_node) = tdecl;
15957 tdecl = add_builtin_type ("__vector unsigned long",
15958 unsigned_V2DI_type_node);
15959 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
15961 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
15962 TYPE_NAME (bool_V2DI_type_node) = tdecl;
15964 else
15966 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
15967 TYPE_NAME (V2DI_type_node) = tdecl;
15969 tdecl = add_builtin_type ("__vector unsigned long long",
15970 unsigned_V2DI_type_node);
15971 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
15973 tdecl = add_builtin_type ("__vector __bool long long",
15974 bool_V2DI_type_node);
15975 TYPE_NAME (bool_V2DI_type_node) = tdecl;
15978 if (V1TI_type_node)
15980 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
15981 TYPE_NAME (V1TI_type_node) = tdecl;
15983 tdecl = add_builtin_type ("__vector unsigned __int128",
15984 unsigned_V1TI_type_node);
15985 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
15988 /* Paired and SPE builtins are only available if you build a compiler with
15989 the appropriate options, so only create those builtins with the
15990 appropriate compiler option. Create Altivec and VSX builtins on machines
15991 with at least the general purpose extensions (970 and newer) to allow the
15992 use of the target attribute. */
15993 if (TARGET_PAIRED_FLOAT)
15994 paired_init_builtins ();
15995 if (TARGET_SPE)
15996 spe_init_builtins ();
15997 if (TARGET_EXTRA_BUILTINS)
15998 altivec_init_builtins ();
15999 if (TARGET_HTM)
16000 htm_init_builtins ();
16002 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
16003 rs6000_common_init_builtins ();
16005 ftype = build_function_type_list (ieee128_float_type_node,
16006 const_str_type_node, NULL_TREE);
16007 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16008 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16010 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16011 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16012 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16014 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16015 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16016 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16018 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16019 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16020 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16022 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16023 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16024 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16026 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16027 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16028 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16030 mode = (TARGET_64BIT) ? DImode : SImode;
16031 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16032 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16033 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16035 ftype = build_function_type_list (unsigned_intDI_type_node,
16036 NULL_TREE);
16037 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16039 if (TARGET_64BIT)
16040 ftype = build_function_type_list (unsigned_intDI_type_node,
16041 NULL_TREE);
16042 else
16043 ftype = build_function_type_list (unsigned_intSI_type_node,
16044 NULL_TREE);
16045 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16047 ftype = build_function_type_list (double_type_node, NULL_TREE);
16048 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16050 ftype = build_function_type_list (void_type_node,
16051 intSI_type_node, double_type_node,
16052 NULL_TREE);
16053 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16055 ftype = build_function_type_list (void_type_node, NULL_TREE);
16056 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16058 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16059 NULL_TREE);
16060 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16061 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16063 #if TARGET_XCOFF
16064 /* AIX libm provides clog as __clog. */
16065 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16066 set_user_assembler_name (tdecl, "__clog");
16067 #endif
16069 #ifdef SUBTARGET_INIT_BUILTINS
16070 SUBTARGET_INIT_BUILTINS;
16071 #endif
16074 /* Returns the rs6000 builtin decl for CODE. */
16076 static tree
16077 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16079 HOST_WIDE_INT fnmask;
16081 if (code >= RS6000_BUILTIN_COUNT)
16082 return error_mark_node;
16084 fnmask = rs6000_builtin_info[code].mask;
16085 if ((fnmask & rs6000_builtin_mask) != fnmask)
16087 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16088 return error_mark_node;
16091 return rs6000_builtin_decls[code];
16094 static void
16095 spe_init_builtins (void)
16097 tree puint_type_node = build_pointer_type (unsigned_type_node);
16098 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
16099 const struct builtin_description *d;
16100 size_t i;
16102 tree v2si_ftype_4_v2si
16103 = build_function_type_list (opaque_V2SI_type_node,
16104 opaque_V2SI_type_node,
16105 opaque_V2SI_type_node,
16106 opaque_V2SI_type_node,
16107 opaque_V2SI_type_node,
16108 NULL_TREE);
16110 tree v2sf_ftype_4_v2sf
16111 = build_function_type_list (opaque_V2SF_type_node,
16112 opaque_V2SF_type_node,
16113 opaque_V2SF_type_node,
16114 opaque_V2SF_type_node,
16115 opaque_V2SF_type_node,
16116 NULL_TREE);
16118 tree int_ftype_int_v2si_v2si
16119 = build_function_type_list (integer_type_node,
16120 integer_type_node,
16121 opaque_V2SI_type_node,
16122 opaque_V2SI_type_node,
16123 NULL_TREE);
16125 tree int_ftype_int_v2sf_v2sf
16126 = build_function_type_list (integer_type_node,
16127 integer_type_node,
16128 opaque_V2SF_type_node,
16129 opaque_V2SF_type_node,
16130 NULL_TREE);
16132 tree void_ftype_v2si_puint_int
16133 = build_function_type_list (void_type_node,
16134 opaque_V2SI_type_node,
16135 puint_type_node,
16136 integer_type_node,
16137 NULL_TREE);
16139 tree void_ftype_v2si_puint_char
16140 = build_function_type_list (void_type_node,
16141 opaque_V2SI_type_node,
16142 puint_type_node,
16143 char_type_node,
16144 NULL_TREE);
16146 tree void_ftype_v2si_pv2si_int
16147 = build_function_type_list (void_type_node,
16148 opaque_V2SI_type_node,
16149 opaque_p_V2SI_type_node,
16150 integer_type_node,
16151 NULL_TREE);
16153 tree void_ftype_v2si_pv2si_char
16154 = build_function_type_list (void_type_node,
16155 opaque_V2SI_type_node,
16156 opaque_p_V2SI_type_node,
16157 char_type_node,
16158 NULL_TREE);
16160 tree void_ftype_int
16161 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16163 tree int_ftype_void
16164 = build_function_type_list (integer_type_node, NULL_TREE);
16166 tree v2si_ftype_pv2si_int
16167 = build_function_type_list (opaque_V2SI_type_node,
16168 opaque_p_V2SI_type_node,
16169 integer_type_node,
16170 NULL_TREE);
16172 tree v2si_ftype_puint_int
16173 = build_function_type_list (opaque_V2SI_type_node,
16174 puint_type_node,
16175 integer_type_node,
16176 NULL_TREE);
16178 tree v2si_ftype_pushort_int
16179 = build_function_type_list (opaque_V2SI_type_node,
16180 pushort_type_node,
16181 integer_type_node,
16182 NULL_TREE);
16184 tree v2si_ftype_signed_char
16185 = build_function_type_list (opaque_V2SI_type_node,
16186 signed_char_type_node,
16187 NULL_TREE);
16189 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
16191 /* Initialize irregular SPE builtins. */
16193 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
16194 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
16195 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
16196 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
16197 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
16198 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
16199 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
16200 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
16201 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
16202 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
16203 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
16204 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
16205 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
16206 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
16207 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
16208 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
16209 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
16210 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
16212 /* Loads. */
16213 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
16214 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
16215 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
16216 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
16217 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
16218 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
16219 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
16220 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
16221 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
16222 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
16223 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
16224 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
16225 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
16226 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
16227 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
16228 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
16229 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
16230 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
16231 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
16232 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
16233 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
16234 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
16236 /* Predicates. */
16237 d = bdesc_spe_predicates;
16238 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
16240 tree type;
16242 switch (insn_data[d->icode].operand[1].mode)
16244 case V2SImode:
16245 type = int_ftype_int_v2si_v2si;
16246 break;
16247 case V2SFmode:
16248 type = int_ftype_int_v2sf_v2sf;
16249 break;
16250 default:
16251 gcc_unreachable ();
16254 def_builtin (d->name, type, d->code);
16257 /* Evsel predicates. */
16258 d = bdesc_spe_evsel;
16259 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
16261 tree type;
16263 switch (insn_data[d->icode].operand[1].mode)
16265 case V2SImode:
16266 type = v2si_ftype_4_v2si;
16267 break;
16268 case V2SFmode:
16269 type = v2sf_ftype_4_v2sf;
16270 break;
16271 default:
16272 gcc_unreachable ();
16275 def_builtin (d->name, type, d->code);
16279 static void
16280 paired_init_builtins (void)
16282 const struct builtin_description *d;
16283 size_t i;
16285 tree int_ftype_int_v2sf_v2sf
16286 = build_function_type_list (integer_type_node,
16287 integer_type_node,
16288 V2SF_type_node,
16289 V2SF_type_node,
16290 NULL_TREE);
16291 tree pcfloat_type_node =
16292 build_pointer_type (build_qualified_type
16293 (float_type_node, TYPE_QUAL_CONST));
16295 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
16296 long_integer_type_node,
16297 pcfloat_type_node,
16298 NULL_TREE);
16299 tree void_ftype_v2sf_long_pcfloat =
16300 build_function_type_list (void_type_node,
16301 V2SF_type_node,
16302 long_integer_type_node,
16303 pcfloat_type_node,
16304 NULL_TREE);
16307 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
16308 PAIRED_BUILTIN_LX);
16311 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
16312 PAIRED_BUILTIN_STX);
16314 /* Predicates. */
16315 d = bdesc_paired_preds;
16316 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
16318 tree type;
16320 if (TARGET_DEBUG_BUILTIN)
16321 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
16322 (int)i, get_insn_name (d->icode), (int)d->icode,
16323 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
16325 switch (insn_data[d->icode].operand[1].mode)
16327 case V2SFmode:
16328 type = int_ftype_int_v2sf_v2sf;
16329 break;
16330 default:
16331 gcc_unreachable ();
16334 def_builtin (d->name, type, d->code);
16338 static void
16339 altivec_init_builtins (void)
16341 const struct builtin_description *d;
16342 size_t i;
16343 tree ftype;
16344 tree decl;
16346 tree pvoid_type_node = build_pointer_type (void_type_node);
16348 tree pcvoid_type_node
16349 = build_pointer_type (build_qualified_type (void_type_node,
16350 TYPE_QUAL_CONST));
16352 tree int_ftype_opaque
16353 = build_function_type_list (integer_type_node,
16354 opaque_V4SI_type_node, NULL_TREE);
16355 tree opaque_ftype_opaque
16356 = build_function_type_list (integer_type_node, NULL_TREE);
16357 tree opaque_ftype_opaque_int
16358 = build_function_type_list (opaque_V4SI_type_node,
16359 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16360 tree opaque_ftype_opaque_opaque_int
16361 = build_function_type_list (opaque_V4SI_type_node,
16362 opaque_V4SI_type_node, opaque_V4SI_type_node,
16363 integer_type_node, NULL_TREE);
16364 tree opaque_ftype_opaque_opaque_opaque
16365 = build_function_type_list (opaque_V4SI_type_node,
16366 opaque_V4SI_type_node, opaque_V4SI_type_node,
16367 opaque_V4SI_type_node, NULL_TREE);
16368 tree opaque_ftype_opaque_opaque
16369 = build_function_type_list (opaque_V4SI_type_node,
16370 opaque_V4SI_type_node, opaque_V4SI_type_node,
16371 NULL_TREE);
16372 tree int_ftype_int_opaque_opaque
16373 = build_function_type_list (integer_type_node,
16374 integer_type_node, opaque_V4SI_type_node,
16375 opaque_V4SI_type_node, NULL_TREE);
16376 tree int_ftype_int_v4si_v4si
16377 = build_function_type_list (integer_type_node,
16378 integer_type_node, V4SI_type_node,
16379 V4SI_type_node, NULL_TREE);
16380 tree int_ftype_int_v2di_v2di
16381 = build_function_type_list (integer_type_node,
16382 integer_type_node, V2DI_type_node,
16383 V2DI_type_node, NULL_TREE);
16384 tree void_ftype_v4si
16385 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16386 tree v8hi_ftype_void
16387 = build_function_type_list (V8HI_type_node, NULL_TREE);
16388 tree void_ftype_void
16389 = build_function_type_list (void_type_node, NULL_TREE);
16390 tree void_ftype_int
16391 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16393 tree opaque_ftype_long_pcvoid
16394 = build_function_type_list (opaque_V4SI_type_node,
16395 long_integer_type_node, pcvoid_type_node,
16396 NULL_TREE);
16397 tree v16qi_ftype_long_pcvoid
16398 = build_function_type_list (V16QI_type_node,
16399 long_integer_type_node, pcvoid_type_node,
16400 NULL_TREE);
16401 tree v8hi_ftype_long_pcvoid
16402 = build_function_type_list (V8HI_type_node,
16403 long_integer_type_node, pcvoid_type_node,
16404 NULL_TREE);
16405 tree v4si_ftype_long_pcvoid
16406 = build_function_type_list (V4SI_type_node,
16407 long_integer_type_node, pcvoid_type_node,
16408 NULL_TREE);
16409 tree v4sf_ftype_long_pcvoid
16410 = build_function_type_list (V4SF_type_node,
16411 long_integer_type_node, pcvoid_type_node,
16412 NULL_TREE);
16413 tree v2df_ftype_long_pcvoid
16414 = build_function_type_list (V2DF_type_node,
16415 long_integer_type_node, pcvoid_type_node,
16416 NULL_TREE);
16417 tree v2di_ftype_long_pcvoid
16418 = build_function_type_list (V2DI_type_node,
16419 long_integer_type_node, pcvoid_type_node,
16420 NULL_TREE);
16422 tree void_ftype_opaque_long_pvoid
16423 = build_function_type_list (void_type_node,
16424 opaque_V4SI_type_node, long_integer_type_node,
16425 pvoid_type_node, NULL_TREE);
16426 tree void_ftype_v4si_long_pvoid
16427 = build_function_type_list (void_type_node,
16428 V4SI_type_node, long_integer_type_node,
16429 pvoid_type_node, NULL_TREE);
16430 tree void_ftype_v16qi_long_pvoid
16431 = build_function_type_list (void_type_node,
16432 V16QI_type_node, long_integer_type_node,
16433 pvoid_type_node, NULL_TREE);
16434 tree void_ftype_v8hi_long_pvoid
16435 = build_function_type_list (void_type_node,
16436 V8HI_type_node, long_integer_type_node,
16437 pvoid_type_node, NULL_TREE);
16438 tree void_ftype_v4sf_long_pvoid
16439 = build_function_type_list (void_type_node,
16440 V4SF_type_node, long_integer_type_node,
16441 pvoid_type_node, NULL_TREE);
16442 tree void_ftype_v2df_long_pvoid
16443 = build_function_type_list (void_type_node,
16444 V2DF_type_node, long_integer_type_node,
16445 pvoid_type_node, NULL_TREE);
16446 tree void_ftype_v2di_long_pvoid
16447 = build_function_type_list (void_type_node,
16448 V2DI_type_node, long_integer_type_node,
16449 pvoid_type_node, NULL_TREE);
16450 tree int_ftype_int_v8hi_v8hi
16451 = build_function_type_list (integer_type_node,
16452 integer_type_node, V8HI_type_node,
16453 V8HI_type_node, NULL_TREE);
16454 tree int_ftype_int_v16qi_v16qi
16455 = build_function_type_list (integer_type_node,
16456 integer_type_node, V16QI_type_node,
16457 V16QI_type_node, NULL_TREE);
16458 tree int_ftype_int_v4sf_v4sf
16459 = build_function_type_list (integer_type_node,
16460 integer_type_node, V4SF_type_node,
16461 V4SF_type_node, NULL_TREE);
16462 tree int_ftype_int_v2df_v2df
16463 = build_function_type_list (integer_type_node,
16464 integer_type_node, V2DF_type_node,
16465 V2DF_type_node, NULL_TREE);
16466 tree v2di_ftype_v2di
16467 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16468 tree v4si_ftype_v4si
16469 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16470 tree v8hi_ftype_v8hi
16471 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16472 tree v16qi_ftype_v16qi
16473 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16474 tree v4sf_ftype_v4sf
16475 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16476 tree v2df_ftype_v2df
16477 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16478 tree void_ftype_pcvoid_int_int
16479 = build_function_type_list (void_type_node,
16480 pcvoid_type_node, integer_type_node,
16481 integer_type_node, NULL_TREE);
16483 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16484 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16485 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16486 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16487 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16488 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16489 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16490 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16491 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16492 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16493 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16494 ALTIVEC_BUILTIN_LVXL_V2DF);
16495 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16496 ALTIVEC_BUILTIN_LVXL_V2DI);
16497 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16498 ALTIVEC_BUILTIN_LVXL_V4SF);
16499 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16500 ALTIVEC_BUILTIN_LVXL_V4SI);
16501 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16502 ALTIVEC_BUILTIN_LVXL_V8HI);
16503 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16504 ALTIVEC_BUILTIN_LVXL_V16QI);
16505 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16506 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16507 ALTIVEC_BUILTIN_LVX_V2DF);
16508 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16509 ALTIVEC_BUILTIN_LVX_V2DI);
16510 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16511 ALTIVEC_BUILTIN_LVX_V4SF);
16512 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16513 ALTIVEC_BUILTIN_LVX_V4SI);
16514 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16515 ALTIVEC_BUILTIN_LVX_V8HI);
16516 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16517 ALTIVEC_BUILTIN_LVX_V16QI);
16518 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16519 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16520 ALTIVEC_BUILTIN_STVX_V2DF);
16521 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16522 ALTIVEC_BUILTIN_STVX_V2DI);
16523 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16524 ALTIVEC_BUILTIN_STVX_V4SF);
16525 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16526 ALTIVEC_BUILTIN_STVX_V4SI);
16527 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16528 ALTIVEC_BUILTIN_STVX_V8HI);
16529 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16530 ALTIVEC_BUILTIN_STVX_V16QI);
16531 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16532 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16533 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16534 ALTIVEC_BUILTIN_STVXL_V2DF);
16535 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16536 ALTIVEC_BUILTIN_STVXL_V2DI);
16537 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16538 ALTIVEC_BUILTIN_STVXL_V4SF);
16539 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16540 ALTIVEC_BUILTIN_STVXL_V4SI);
16541 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16542 ALTIVEC_BUILTIN_STVXL_V8HI);
16543 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16544 ALTIVEC_BUILTIN_STVXL_V16QI);
16545 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16546 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16547 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16548 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16549 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16550 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16551 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16552 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16553 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16554 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16555 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16556 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16557 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16558 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16559 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16560 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16562 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16563 VSX_BUILTIN_LXVD2X_V2DF);
16564 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16565 VSX_BUILTIN_LXVD2X_V2DI);
16566 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16567 VSX_BUILTIN_LXVW4X_V4SF);
16568 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16569 VSX_BUILTIN_LXVW4X_V4SI);
16570 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16571 VSX_BUILTIN_LXVW4X_V8HI);
16572 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16573 VSX_BUILTIN_LXVW4X_V16QI);
16574 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16575 VSX_BUILTIN_STXVD2X_V2DF);
16576 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16577 VSX_BUILTIN_STXVD2X_V2DI);
16578 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16579 VSX_BUILTIN_STXVW4X_V4SF);
16580 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16581 VSX_BUILTIN_STXVW4X_V4SI);
16582 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16583 VSX_BUILTIN_STXVW4X_V8HI);
16584 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16585 VSX_BUILTIN_STXVW4X_V16QI);
16587 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16588 VSX_BUILTIN_LD_ELEMREV_V2DF);
16589 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16590 VSX_BUILTIN_LD_ELEMREV_V2DI);
16591 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16592 VSX_BUILTIN_LD_ELEMREV_V4SF);
16593 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16594 VSX_BUILTIN_LD_ELEMREV_V4SI);
16595 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16596 VSX_BUILTIN_ST_ELEMREV_V2DF);
16597 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16598 VSX_BUILTIN_ST_ELEMREV_V2DI);
16599 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16600 VSX_BUILTIN_ST_ELEMREV_V4SF);
16601 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16602 VSX_BUILTIN_ST_ELEMREV_V4SI);
16604 if (TARGET_P9_VECTOR)
16606 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16607 VSX_BUILTIN_LD_ELEMREV_V8HI);
16608 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16609 VSX_BUILTIN_LD_ELEMREV_V16QI);
16610 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
16611 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
16612 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
16613 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
16616 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16617 VSX_BUILTIN_VEC_LD);
16618 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16619 VSX_BUILTIN_VEC_ST);
16620 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16621 VSX_BUILTIN_VEC_XL);
16622 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16623 VSX_BUILTIN_VEC_XST);
16625 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16626 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16627 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16629 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16630 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16631 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16632 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16633 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16634 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16635 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16636 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16637 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16638 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16639 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16640 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16642 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16643 ALTIVEC_BUILTIN_VEC_ADDE);
16644 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16645 ALTIVEC_BUILTIN_VEC_ADDEC);
16646 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16647 ALTIVEC_BUILTIN_VEC_CMPNE);
16648 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16649 ALTIVEC_BUILTIN_VEC_MUL);
16651 /* Cell builtins. */
16652 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16653 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16654 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16655 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16657 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16658 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16659 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16660 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16662 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16663 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16664 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16665 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16667 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16668 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16669 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16670 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16672 /* Add the DST variants. */
16673 d = bdesc_dst;
16674 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16675 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16677 /* Initialize the predicates. */
16678 d = bdesc_altivec_preds;
16679 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16681 machine_mode mode1;
16682 tree type;
16684 if (rs6000_overloaded_builtin_p (d->code))
16685 mode1 = VOIDmode;
16686 else
16687 mode1 = insn_data[d->icode].operand[1].mode;
16689 switch (mode1)
16691 case VOIDmode:
16692 type = int_ftype_int_opaque_opaque;
16693 break;
16694 case V2DImode:
16695 type = int_ftype_int_v2di_v2di;
16696 break;
16697 case V4SImode:
16698 type = int_ftype_int_v4si_v4si;
16699 break;
16700 case V8HImode:
16701 type = int_ftype_int_v8hi_v8hi;
16702 break;
16703 case V16QImode:
16704 type = int_ftype_int_v16qi_v16qi;
16705 break;
16706 case V4SFmode:
16707 type = int_ftype_int_v4sf_v4sf;
16708 break;
16709 case V2DFmode:
16710 type = int_ftype_int_v2df_v2df;
16711 break;
16712 default:
16713 gcc_unreachable ();
16716 def_builtin (d->name, type, d->code);
16719 /* Initialize the abs* operators. */
16720 d = bdesc_abs;
16721 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16723 machine_mode mode0;
16724 tree type;
16726 mode0 = insn_data[d->icode].operand[0].mode;
16728 switch (mode0)
16730 case V2DImode:
16731 type = v2di_ftype_v2di;
16732 break;
16733 case V4SImode:
16734 type = v4si_ftype_v4si;
16735 break;
16736 case V8HImode:
16737 type = v8hi_ftype_v8hi;
16738 break;
16739 case V16QImode:
16740 type = v16qi_ftype_v16qi;
16741 break;
16742 case V4SFmode:
16743 type = v4sf_ftype_v4sf;
16744 break;
16745 case V2DFmode:
16746 type = v2df_ftype_v2df;
16747 break;
16748 default:
16749 gcc_unreachable ();
16752 def_builtin (d->name, type, d->code);
16755 /* Initialize target builtin that implements
16756 targetm.vectorize.builtin_mask_for_load. */
16758 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
16759 v16qi_ftype_long_pcvoid,
16760 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
16761 BUILT_IN_MD, NULL, NULL_TREE);
16762 TREE_READONLY (decl) = 1;
16763 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
16764 altivec_builtin_mask_for_load = decl;
16766 /* Access to the vec_init patterns. */
16767 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
16768 integer_type_node, integer_type_node,
16769 integer_type_node, NULL_TREE);
16770 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
16772 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
16773 short_integer_type_node,
16774 short_integer_type_node,
16775 short_integer_type_node,
16776 short_integer_type_node,
16777 short_integer_type_node,
16778 short_integer_type_node,
16779 short_integer_type_node, NULL_TREE);
16780 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
16782 ftype = build_function_type_list (V16QI_type_node, char_type_node,
16783 char_type_node, char_type_node,
16784 char_type_node, char_type_node,
16785 char_type_node, char_type_node,
16786 char_type_node, char_type_node,
16787 char_type_node, char_type_node,
16788 char_type_node, char_type_node,
16789 char_type_node, char_type_node,
16790 char_type_node, NULL_TREE);
16791 def_builtin ("__builtin_vec_init_v16qi", ftype,
16792 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
16794 ftype = build_function_type_list (V4SF_type_node, float_type_node,
16795 float_type_node, float_type_node,
16796 float_type_node, NULL_TREE);
16797 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
16799 /* VSX builtins. */
16800 ftype = build_function_type_list (V2DF_type_node, double_type_node,
16801 double_type_node, NULL_TREE);
16802 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
16804 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
16805 intDI_type_node, NULL_TREE);
16806 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
16808 /* Access to the vec_set patterns. */
16809 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
16810 intSI_type_node,
16811 integer_type_node, NULL_TREE);
16812 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
16814 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16815 intHI_type_node,
16816 integer_type_node, NULL_TREE);
16817 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
16819 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
16820 intQI_type_node,
16821 integer_type_node, NULL_TREE);
16822 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
16824 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
16825 float_type_node,
16826 integer_type_node, NULL_TREE);
16827 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
16829 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
16830 double_type_node,
16831 integer_type_node, NULL_TREE);
16832 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
16834 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
16835 intDI_type_node,
16836 integer_type_node, NULL_TREE);
16837 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
16839 /* Access to the vec_extract patterns. */
16840 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16841 integer_type_node, NULL_TREE);
16842 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
16844 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16845 integer_type_node, NULL_TREE);
16846 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
16848 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
16849 integer_type_node, NULL_TREE);
16850 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
16852 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16853 integer_type_node, NULL_TREE);
16854 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
16856 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16857 integer_type_node, NULL_TREE);
16858 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
16860 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
16861 integer_type_node, NULL_TREE);
16862 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
16865 if (V1TI_type_node)
16867 tree v1ti_ftype_long_pcvoid
16868 = build_function_type_list (V1TI_type_node,
16869 long_integer_type_node, pcvoid_type_node,
16870 NULL_TREE);
16871 tree void_ftype_v1ti_long_pvoid
16872 = build_function_type_list (void_type_node,
16873 V1TI_type_node, long_integer_type_node,
16874 pvoid_type_node, NULL_TREE);
16875 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
16876 VSX_BUILTIN_LXVD2X_V1TI);
16877 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
16878 VSX_BUILTIN_STXVD2X_V1TI);
16879 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
16880 NULL_TREE, NULL_TREE);
16881 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
16882 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
16883 intTI_type_node,
16884 integer_type_node, NULL_TREE);
16885 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
16886 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
16887 integer_type_node, NULL_TREE);
16888 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
16893 static void
16894 htm_init_builtins (void)
16896 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16897 const struct builtin_description *d;
16898 size_t i;
16900 d = bdesc_htm;
16901 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
16903 tree op[MAX_HTM_OPERANDS], type;
16904 HOST_WIDE_INT mask = d->mask;
16905 unsigned attr = rs6000_builtin_info[d->code].attr;
16906 bool void_func = (attr & RS6000_BTC_VOID);
16907 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
16908 int nopnds = 0;
16909 tree gpr_type_node;
16910 tree rettype;
16911 tree argtype;
16913 if (TARGET_32BIT && TARGET_POWERPC64)
16914 gpr_type_node = long_long_unsigned_type_node;
16915 else
16916 gpr_type_node = long_unsigned_type_node;
16918 if (attr & RS6000_BTC_SPR)
16920 rettype = gpr_type_node;
16921 argtype = gpr_type_node;
16923 else if (d->code == HTM_BUILTIN_TABORTDC
16924 || d->code == HTM_BUILTIN_TABORTDCI)
16926 rettype = unsigned_type_node;
16927 argtype = gpr_type_node;
16929 else
16931 rettype = unsigned_type_node;
16932 argtype = unsigned_type_node;
16935 if ((mask & builtin_mask) != mask)
16937 if (TARGET_DEBUG_BUILTIN)
16938 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
16939 continue;
16942 if (d->name == 0)
16944 if (TARGET_DEBUG_BUILTIN)
16945 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
16946 (long unsigned) i);
16947 continue;
16950 op[nopnds++] = (void_func) ? void_type_node : rettype;
16952 if (attr_args == RS6000_BTC_UNARY)
16953 op[nopnds++] = argtype;
16954 else if (attr_args == RS6000_BTC_BINARY)
16956 op[nopnds++] = argtype;
16957 op[nopnds++] = argtype;
16959 else if (attr_args == RS6000_BTC_TERNARY)
16961 op[nopnds++] = argtype;
16962 op[nopnds++] = argtype;
16963 op[nopnds++] = argtype;
16966 switch (nopnds)
16968 case 1:
16969 type = build_function_type_list (op[0], NULL_TREE);
16970 break;
16971 case 2:
16972 type = build_function_type_list (op[0], op[1], NULL_TREE);
16973 break;
16974 case 3:
16975 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
16976 break;
16977 case 4:
16978 type = build_function_type_list (op[0], op[1], op[2], op[3],
16979 NULL_TREE);
16980 break;
16981 default:
16982 gcc_unreachable ();
16985 def_builtin (d->name, type, d->code);
16989 /* Hash function for builtin functions with up to 3 arguments and a return
16990 type. */
16991 hashval_t
16992 builtin_hasher::hash (builtin_hash_struct *bh)
16994 unsigned ret = 0;
16995 int i;
16997 for (i = 0; i < 4; i++)
16999 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17000 ret = (ret * 2) + bh->uns_p[i];
17003 return ret;
17006 /* Compare builtin hash entries H1 and H2 for equivalence. */
17007 bool
17008 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17010 return ((p1->mode[0] == p2->mode[0])
17011 && (p1->mode[1] == p2->mode[1])
17012 && (p1->mode[2] == p2->mode[2])
17013 && (p1->mode[3] == p2->mode[3])
17014 && (p1->uns_p[0] == p2->uns_p[0])
17015 && (p1->uns_p[1] == p2->uns_p[1])
17016 && (p1->uns_p[2] == p2->uns_p[2])
17017 && (p1->uns_p[3] == p2->uns_p[3]));
17020 /* Map types for builtin functions with an explicit return type and up to 3
17021 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17022 of the argument. */
17023 static tree
17024 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17025 machine_mode mode_arg1, machine_mode mode_arg2,
17026 enum rs6000_builtins builtin, const char *name)
17028 struct builtin_hash_struct h;
17029 struct builtin_hash_struct *h2;
17030 int num_args = 3;
17031 int i;
17032 tree ret_type = NULL_TREE;
17033 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17035 /* Create builtin_hash_table. */
17036 if (builtin_hash_table == NULL)
17037 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17039 h.type = NULL_TREE;
17040 h.mode[0] = mode_ret;
17041 h.mode[1] = mode_arg0;
17042 h.mode[2] = mode_arg1;
17043 h.mode[3] = mode_arg2;
17044 h.uns_p[0] = 0;
17045 h.uns_p[1] = 0;
17046 h.uns_p[2] = 0;
17047 h.uns_p[3] = 0;
17049 /* If the builtin is a type that produces unsigned results or takes unsigned
17050 arguments, and it is returned as a decl for the vectorizer (such as
17051 widening multiplies, permute), make sure the arguments and return value
17052 are type correct. */
17053 switch (builtin)
17055 /* unsigned 1 argument functions. */
17056 case CRYPTO_BUILTIN_VSBOX:
17057 case P8V_BUILTIN_VGBBD:
17058 case MISC_BUILTIN_CDTBCD:
17059 case MISC_BUILTIN_CBCDTD:
17060 h.uns_p[0] = 1;
17061 h.uns_p[1] = 1;
17062 break;
17064 /* unsigned 2 argument functions. */
17065 case ALTIVEC_BUILTIN_VMULEUB_UNS:
17066 case ALTIVEC_BUILTIN_VMULEUH_UNS:
17067 case ALTIVEC_BUILTIN_VMULOUB_UNS:
17068 case ALTIVEC_BUILTIN_VMULOUH_UNS:
17069 case CRYPTO_BUILTIN_VCIPHER:
17070 case CRYPTO_BUILTIN_VCIPHERLAST:
17071 case CRYPTO_BUILTIN_VNCIPHER:
17072 case CRYPTO_BUILTIN_VNCIPHERLAST:
17073 case CRYPTO_BUILTIN_VPMSUMB:
17074 case CRYPTO_BUILTIN_VPMSUMH:
17075 case CRYPTO_BUILTIN_VPMSUMW:
17076 case CRYPTO_BUILTIN_VPMSUMD:
17077 case CRYPTO_BUILTIN_VPMSUM:
17078 case MISC_BUILTIN_ADDG6S:
17079 case MISC_BUILTIN_DIVWEU:
17080 case MISC_BUILTIN_DIVWEUO:
17081 case MISC_BUILTIN_DIVDEU:
17082 case MISC_BUILTIN_DIVDEUO:
17083 h.uns_p[0] = 1;
17084 h.uns_p[1] = 1;
17085 h.uns_p[2] = 1;
17086 break;
17088 /* unsigned 3 argument functions. */
17089 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17090 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17091 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17092 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17093 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17094 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17095 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17096 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17097 case VSX_BUILTIN_VPERM_16QI_UNS:
17098 case VSX_BUILTIN_VPERM_8HI_UNS:
17099 case VSX_BUILTIN_VPERM_4SI_UNS:
17100 case VSX_BUILTIN_VPERM_2DI_UNS:
17101 case VSX_BUILTIN_XXSEL_16QI_UNS:
17102 case VSX_BUILTIN_XXSEL_8HI_UNS:
17103 case VSX_BUILTIN_XXSEL_4SI_UNS:
17104 case VSX_BUILTIN_XXSEL_2DI_UNS:
17105 case CRYPTO_BUILTIN_VPERMXOR:
17106 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17107 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17108 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17109 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17110 case CRYPTO_BUILTIN_VSHASIGMAW:
17111 case CRYPTO_BUILTIN_VSHASIGMAD:
17112 case CRYPTO_BUILTIN_VSHASIGMA:
17113 h.uns_p[0] = 1;
17114 h.uns_p[1] = 1;
17115 h.uns_p[2] = 1;
17116 h.uns_p[3] = 1;
17117 break;
17119 /* signed permute functions with unsigned char mask. */
17120 case ALTIVEC_BUILTIN_VPERM_16QI:
17121 case ALTIVEC_BUILTIN_VPERM_8HI:
17122 case ALTIVEC_BUILTIN_VPERM_4SI:
17123 case ALTIVEC_BUILTIN_VPERM_4SF:
17124 case ALTIVEC_BUILTIN_VPERM_2DI:
17125 case ALTIVEC_BUILTIN_VPERM_2DF:
17126 case VSX_BUILTIN_VPERM_16QI:
17127 case VSX_BUILTIN_VPERM_8HI:
17128 case VSX_BUILTIN_VPERM_4SI:
17129 case VSX_BUILTIN_VPERM_4SF:
17130 case VSX_BUILTIN_VPERM_2DI:
17131 case VSX_BUILTIN_VPERM_2DF:
17132 h.uns_p[3] = 1;
17133 break;
17135 /* unsigned args, signed return. */
17136 case VSX_BUILTIN_XVCVUXDDP_UNS:
17137 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17138 h.uns_p[1] = 1;
17139 break;
17141 /* signed args, unsigned return. */
17142 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17143 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17144 case MISC_BUILTIN_UNPACK_TD:
17145 case MISC_BUILTIN_UNPACK_V1TI:
17146 h.uns_p[0] = 1;
17147 break;
17149 /* unsigned arguments for 128-bit pack instructions. */
17150 case MISC_BUILTIN_PACK_TD:
17151 case MISC_BUILTIN_PACK_V1TI:
17152 h.uns_p[1] = 1;
17153 h.uns_p[2] = 1;
17154 break;
17156 default:
17157 break;
17160 /* Figure out how many args are present. */
17161 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17162 num_args--;
17164 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17165 if (!ret_type && h.uns_p[0])
17166 ret_type = builtin_mode_to_type[h.mode[0]][0];
17168 if (!ret_type)
17169 fatal_error (input_location,
17170 "internal error: builtin function %s had an unexpected "
17171 "return type %s", name, GET_MODE_NAME (h.mode[0]));
17173 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17174 arg_type[i] = NULL_TREE;
17176 for (i = 0; i < num_args; i++)
17178 int m = (int) h.mode[i+1];
17179 int uns_p = h.uns_p[i+1];
17181 arg_type[i] = builtin_mode_to_type[m][uns_p];
17182 if (!arg_type[i] && uns_p)
17183 arg_type[i] = builtin_mode_to_type[m][0];
17185 if (!arg_type[i])
17186 fatal_error (input_location,
17187 "internal error: builtin function %s, argument %d "
17188 "had unexpected argument type %s", name, i,
17189 GET_MODE_NAME (m));
17192 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17193 if (*found == NULL)
17195 h2 = ggc_alloc<builtin_hash_struct> ();
17196 *h2 = h;
17197 *found = h2;
17199 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17200 arg_type[2], NULL_TREE);
17203 return (*found)->type;
17206 static void
17207 rs6000_common_init_builtins (void)
17209 const struct builtin_description *d;
17210 size_t i;
17212 tree opaque_ftype_opaque = NULL_TREE;
17213 tree opaque_ftype_opaque_opaque = NULL_TREE;
17214 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17215 tree v2si_ftype = NULL_TREE;
17216 tree v2si_ftype_qi = NULL_TREE;
17217 tree v2si_ftype_v2si_qi = NULL_TREE;
17218 tree v2si_ftype_int_qi = NULL_TREE;
17219 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17221 if (!TARGET_PAIRED_FLOAT)
17223 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
17224 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
17227 /* Paired and SPE builtins are only available if you build a compiler with
17228 the appropriate options, so only create those builtins with the
17229 appropriate compiler option. Create Altivec and VSX builtins on machines
17230 with at least the general purpose extensions (970 and newer) to allow the
17231 use of the target attribute.. */
17233 if (TARGET_EXTRA_BUILTINS)
17234 builtin_mask |= RS6000_BTM_COMMON;
17236 /* Add the ternary operators. */
17237 d = bdesc_3arg;
17238 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17240 tree type;
17241 HOST_WIDE_INT mask = d->mask;
17243 if ((mask & builtin_mask) != mask)
17245 if (TARGET_DEBUG_BUILTIN)
17246 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17247 continue;
17250 if (rs6000_overloaded_builtin_p (d->code))
17252 if (! (type = opaque_ftype_opaque_opaque_opaque))
17253 type = opaque_ftype_opaque_opaque_opaque
17254 = build_function_type_list (opaque_V4SI_type_node,
17255 opaque_V4SI_type_node,
17256 opaque_V4SI_type_node,
17257 opaque_V4SI_type_node,
17258 NULL_TREE);
17260 else
17262 enum insn_code icode = d->icode;
17263 if (d->name == 0)
17265 if (TARGET_DEBUG_BUILTIN)
17266 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17267 (long unsigned)i);
17269 continue;
17272 if (icode == CODE_FOR_nothing)
17274 if (TARGET_DEBUG_BUILTIN)
17275 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17276 d->name);
17278 continue;
17281 type = builtin_function_type (insn_data[icode].operand[0].mode,
17282 insn_data[icode].operand[1].mode,
17283 insn_data[icode].operand[2].mode,
17284 insn_data[icode].operand[3].mode,
17285 d->code, d->name);
17288 def_builtin (d->name, type, d->code);
17291 /* Add the binary operators. */
17292 d = bdesc_2arg;
17293 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17295 machine_mode mode0, mode1, mode2;
17296 tree type;
17297 HOST_WIDE_INT mask = d->mask;
17299 if ((mask & builtin_mask) != mask)
17301 if (TARGET_DEBUG_BUILTIN)
17302 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17303 continue;
17306 if (rs6000_overloaded_builtin_p (d->code))
17308 if (! (type = opaque_ftype_opaque_opaque))
17309 type = opaque_ftype_opaque_opaque
17310 = build_function_type_list (opaque_V4SI_type_node,
17311 opaque_V4SI_type_node,
17312 opaque_V4SI_type_node,
17313 NULL_TREE);
17315 else
17317 enum insn_code icode = d->icode;
17318 if (d->name == 0)
17320 if (TARGET_DEBUG_BUILTIN)
17321 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17322 (long unsigned)i);
17324 continue;
17327 if (icode == CODE_FOR_nothing)
17329 if (TARGET_DEBUG_BUILTIN)
17330 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17331 d->name);
17333 continue;
17336 mode0 = insn_data[icode].operand[0].mode;
17337 mode1 = insn_data[icode].operand[1].mode;
17338 mode2 = insn_data[icode].operand[2].mode;
17340 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
17342 if (! (type = v2si_ftype_v2si_qi))
17343 type = v2si_ftype_v2si_qi
17344 = build_function_type_list (opaque_V2SI_type_node,
17345 opaque_V2SI_type_node,
17346 char_type_node,
17347 NULL_TREE);
17350 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
17351 && mode2 == QImode)
17353 if (! (type = v2si_ftype_int_qi))
17354 type = v2si_ftype_int_qi
17355 = build_function_type_list (opaque_V2SI_type_node,
17356 integer_type_node,
17357 char_type_node,
17358 NULL_TREE);
17361 else
17362 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17363 d->code, d->name);
17366 def_builtin (d->name, type, d->code);
17369 /* Add the simple unary operators. */
17370 d = bdesc_1arg;
17371 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17373 machine_mode mode0, mode1;
17374 tree type;
17375 HOST_WIDE_INT mask = d->mask;
17377 if ((mask & builtin_mask) != mask)
17379 if (TARGET_DEBUG_BUILTIN)
17380 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17381 continue;
17384 if (rs6000_overloaded_builtin_p (d->code))
17386 if (! (type = opaque_ftype_opaque))
17387 type = opaque_ftype_opaque
17388 = build_function_type_list (opaque_V4SI_type_node,
17389 opaque_V4SI_type_node,
17390 NULL_TREE);
17392 else
17394 enum insn_code icode = d->icode;
17395 if (d->name == 0)
17397 if (TARGET_DEBUG_BUILTIN)
17398 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17399 (long unsigned)i);
17401 continue;
17404 if (icode == CODE_FOR_nothing)
17406 if (TARGET_DEBUG_BUILTIN)
17407 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17408 d->name);
17410 continue;
17413 mode0 = insn_data[icode].operand[0].mode;
17414 mode1 = insn_data[icode].operand[1].mode;
17416 if (mode0 == V2SImode && mode1 == QImode)
17418 if (! (type = v2si_ftype_qi))
17419 type = v2si_ftype_qi
17420 = build_function_type_list (opaque_V2SI_type_node,
17421 char_type_node,
17422 NULL_TREE);
17425 else
17426 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17427 d->code, d->name);
17430 def_builtin (d->name, type, d->code);
17433 /* Add the simple no-argument operators. */
17434 d = bdesc_0arg;
17435 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17437 machine_mode mode0;
17438 tree type;
17439 HOST_WIDE_INT mask = d->mask;
17441 if ((mask & builtin_mask) != mask)
17443 if (TARGET_DEBUG_BUILTIN)
17444 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17445 continue;
17447 if (rs6000_overloaded_builtin_p (d->code))
17449 if (!opaque_ftype_opaque)
17450 opaque_ftype_opaque
17451 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17452 type = opaque_ftype_opaque;
17454 else
17456 enum insn_code icode = d->icode;
17457 if (d->name == 0)
17459 if (TARGET_DEBUG_BUILTIN)
17460 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17461 (long unsigned) i);
17462 continue;
17464 if (icode == CODE_FOR_nothing)
17466 if (TARGET_DEBUG_BUILTIN)
17467 fprintf (stderr,
17468 "rs6000_builtin, skip no-argument %s (no code)\n",
17469 d->name);
17470 continue;
17472 mode0 = insn_data[icode].operand[0].mode;
17473 if (mode0 == V2SImode)
17475 /* code for SPE */
17476 if (! (type = v2si_ftype))
17478 v2si_ftype
17479 = build_function_type_list (opaque_V2SI_type_node,
17480 NULL_TREE);
17481 type = v2si_ftype;
17484 else
17485 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17486 d->code, d->name);
17488 def_builtin (d->name, type, d->code);
17492 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17493 static void
17494 init_float128_ibm (machine_mode mode)
17496 if (!TARGET_XL_COMPAT)
17498 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17499 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17500 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17501 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17503 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
17505 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17506 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17507 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17508 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17509 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17510 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17511 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17513 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17514 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17515 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17516 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17517 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17518 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17519 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17520 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17523 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
17524 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17526 else
17528 set_optab_libfunc (add_optab, mode, "_xlqadd");
17529 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17530 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17531 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17534 /* Add various conversions for IFmode to use the traditional TFmode
17535 names. */
17536 if (mode == IFmode)
17538 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
17539 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
17540 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
17541 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
17542 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
17543 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
17545 if (TARGET_POWERPC64)
17547 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17548 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17549 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17550 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17555 /* Set up IEEE 128-bit floating point routines. Use different names if the
17556 arguments can be passed in a vector register. The historical PowerPC
17557 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17558 continue to use that if we aren't using vector registers to pass IEEE
17559 128-bit floating point. */
17561 static void
17562 init_float128_ieee (machine_mode mode)
17564 if (FLOAT128_VECTOR_P (mode))
17566 set_optab_libfunc (add_optab, mode, "__addkf3");
17567 set_optab_libfunc (sub_optab, mode, "__subkf3");
17568 set_optab_libfunc (neg_optab, mode, "__negkf2");
17569 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17570 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17571 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17572 set_optab_libfunc (abs_optab, mode, "__abstkf2");
17574 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17575 set_optab_libfunc (ne_optab, mode, "__nekf2");
17576 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17577 set_optab_libfunc (ge_optab, mode, "__gekf2");
17578 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17579 set_optab_libfunc (le_optab, mode, "__lekf2");
17580 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17582 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17583 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17584 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17585 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17587 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
17588 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17589 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
17591 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
17592 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17593 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
17595 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
17596 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
17597 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
17598 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
17599 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
17600 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
17602 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17603 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17604 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17605 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17607 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17608 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17609 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17610 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17612 if (TARGET_POWERPC64)
17614 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17615 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17616 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17617 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17621 else
17623 set_optab_libfunc (add_optab, mode, "_q_add");
17624 set_optab_libfunc (sub_optab, mode, "_q_sub");
17625 set_optab_libfunc (neg_optab, mode, "_q_neg");
17626 set_optab_libfunc (smul_optab, mode, "_q_mul");
17627 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17628 if (TARGET_PPC_GPOPT)
17629 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17631 set_optab_libfunc (eq_optab, mode, "_q_feq");
17632 set_optab_libfunc (ne_optab, mode, "_q_fne");
17633 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17634 set_optab_libfunc (ge_optab, mode, "_q_fge");
17635 set_optab_libfunc (lt_optab, mode, "_q_flt");
17636 set_optab_libfunc (le_optab, mode, "_q_fle");
17638 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17639 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17640 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17641 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17642 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17643 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17644 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17645 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17649 static void
17650 rs6000_init_libfuncs (void)
17652 /* __float128 support. */
17653 if (TARGET_FLOAT128)
17655 init_float128_ibm (IFmode);
17656 init_float128_ieee (KFmode);
17659 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17660 if (TARGET_LONG_DOUBLE_128)
17662 if (!TARGET_IEEEQUAD)
17663 init_float128_ibm (TFmode);
17665 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17666 else
17667 init_float128_ieee (TFmode);
17672 /* Expand a block clear operation, and return 1 if successful. Return 0
17673 if we should let the compiler generate normal code.
17675 operands[0] is the destination
17676 operands[1] is the length
17677 operands[3] is the alignment */
17680 expand_block_clear (rtx operands[])
17682 rtx orig_dest = operands[0];
17683 rtx bytes_rtx = operands[1];
17684 rtx align_rtx = operands[3];
17685 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
17686 HOST_WIDE_INT align;
17687 HOST_WIDE_INT bytes;
17688 int offset;
17689 int clear_bytes;
17690 int clear_step;
17692 /* If this is not a fixed size move, just call memcpy */
17693 if (! constp)
17694 return 0;
17696 /* This must be a fixed size alignment */
17697 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
17698 align = INTVAL (align_rtx) * BITS_PER_UNIT;
17700 /* Anything to clear? */
17701 bytes = INTVAL (bytes_rtx);
17702 if (bytes <= 0)
17703 return 1;
17705 /* Use the builtin memset after a point, to avoid huge code bloat.
17706 When optimize_size, avoid any significant code bloat; calling
17707 memset is about 4 instructions, so allow for one instruction to
17708 load zero and three to do clearing. */
17709 if (TARGET_ALTIVEC && align >= 128)
17710 clear_step = 16;
17711 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
17712 clear_step = 8;
17713 else if (TARGET_SPE && align >= 64)
17714 clear_step = 8;
17715 else
17716 clear_step = 4;
17718 if (optimize_size && bytes > 3 * clear_step)
17719 return 0;
17720 if (! optimize_size && bytes > 8 * clear_step)
17721 return 0;
17723 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
17725 machine_mode mode = BLKmode;
17726 rtx dest;
17728 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
17730 clear_bytes = 16;
17731 mode = V4SImode;
17733 else if (bytes >= 8 && TARGET_SPE && align >= 64)
17735 clear_bytes = 8;
17736 mode = V2SImode;
17738 else if (bytes >= 8 && TARGET_POWERPC64
17739 && (align >= 64 || !STRICT_ALIGNMENT))
17741 clear_bytes = 8;
17742 mode = DImode;
17743 if (offset == 0 && align < 64)
17745 rtx addr;
17747 /* If the address form is reg+offset with offset not a
17748 multiple of four, reload into reg indirect form here
17749 rather than waiting for reload. This way we get one
17750 reload, not one per store. */
17751 addr = XEXP (orig_dest, 0);
17752 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17753 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17754 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17756 addr = copy_addr_to_reg (addr);
17757 orig_dest = replace_equiv_address (orig_dest, addr);
17761 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
17762 { /* move 4 bytes */
17763 clear_bytes = 4;
17764 mode = SImode;
17766 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
17767 { /* move 2 bytes */
17768 clear_bytes = 2;
17769 mode = HImode;
17771 else /* move 1 byte at a time */
17773 clear_bytes = 1;
17774 mode = QImode;
17777 dest = adjust_address (orig_dest, mode, offset);
17779 emit_move_insn (dest, CONST0_RTX (mode));
17782 return 1;
17786 /* Expand a block move operation, and return 1 if successful. Return 0
17787 if we should let the compiler generate normal code.
17789 operands[0] is the destination
17790 operands[1] is the source
17791 operands[2] is the length
17792 operands[3] is the alignment */
17794 #define MAX_MOVE_REG 4
17797 expand_block_move (rtx operands[])
17799 rtx orig_dest = operands[0];
17800 rtx orig_src = operands[1];
17801 rtx bytes_rtx = operands[2];
17802 rtx align_rtx = operands[3];
17803 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
17804 int align;
17805 int bytes;
17806 int offset;
17807 int move_bytes;
17808 rtx stores[MAX_MOVE_REG];
17809 int num_reg = 0;
17811 /* If this is not a fixed size move, just call memcpy */
17812 if (! constp)
17813 return 0;
17815 /* This must be a fixed size alignment */
17816 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
17817 align = INTVAL (align_rtx) * BITS_PER_UNIT;
17819 /* Anything to move? */
17820 bytes = INTVAL (bytes_rtx);
17821 if (bytes <= 0)
17822 return 1;
17824 if (bytes > rs6000_block_move_inline_limit)
17825 return 0;
17827 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
17829 union {
17830 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
17831 rtx (*mov) (rtx, rtx);
17832 } gen_func;
17833 machine_mode mode = BLKmode;
17834 rtx src, dest;
17836 /* Altivec first, since it will be faster than a string move
17837 when it applies, and usually not significantly larger. */
17838 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
17840 move_bytes = 16;
17841 mode = V4SImode;
17842 gen_func.mov = gen_movv4si;
17844 else if (TARGET_SPE && bytes >= 8 && align >= 64)
17846 move_bytes = 8;
17847 mode = V2SImode;
17848 gen_func.mov = gen_movv2si;
17850 else if (TARGET_STRING
17851 && bytes > 24 /* move up to 32 bytes at a time */
17852 && ! fixed_regs[5]
17853 && ! fixed_regs[6]
17854 && ! fixed_regs[7]
17855 && ! fixed_regs[8]
17856 && ! fixed_regs[9]
17857 && ! fixed_regs[10]
17858 && ! fixed_regs[11]
17859 && ! fixed_regs[12])
17861 move_bytes = (bytes > 32) ? 32 : bytes;
17862 gen_func.movmemsi = gen_movmemsi_8reg;
17864 else if (TARGET_STRING
17865 && bytes > 16 /* move up to 24 bytes at a time */
17866 && ! fixed_regs[5]
17867 && ! fixed_regs[6]
17868 && ! fixed_regs[7]
17869 && ! fixed_regs[8]
17870 && ! fixed_regs[9]
17871 && ! fixed_regs[10])
17873 move_bytes = (bytes > 24) ? 24 : bytes;
17874 gen_func.movmemsi = gen_movmemsi_6reg;
17876 else if (TARGET_STRING
17877 && bytes > 8 /* move up to 16 bytes at a time */
17878 && ! fixed_regs[5]
17879 && ! fixed_regs[6]
17880 && ! fixed_regs[7]
17881 && ! fixed_regs[8])
17883 move_bytes = (bytes > 16) ? 16 : bytes;
17884 gen_func.movmemsi = gen_movmemsi_4reg;
17886 else if (bytes >= 8 && TARGET_POWERPC64
17887 && (align >= 64 || !STRICT_ALIGNMENT))
17889 move_bytes = 8;
17890 mode = DImode;
17891 gen_func.mov = gen_movdi;
17892 if (offset == 0 && align < 64)
17894 rtx addr;
17896 /* If the address form is reg+offset with offset not a
17897 multiple of four, reload into reg indirect form here
17898 rather than waiting for reload. This way we get one
17899 reload, not one per load and/or store. */
17900 addr = XEXP (orig_dest, 0);
17901 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17902 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17903 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17905 addr = copy_addr_to_reg (addr);
17906 orig_dest = replace_equiv_address (orig_dest, addr);
17908 addr = XEXP (orig_src, 0);
17909 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
17910 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17911 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
17913 addr = copy_addr_to_reg (addr);
17914 orig_src = replace_equiv_address (orig_src, addr);
17918 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
17919 { /* move up to 8 bytes at a time */
17920 move_bytes = (bytes > 8) ? 8 : bytes;
17921 gen_func.movmemsi = gen_movmemsi_2reg;
17923 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
17924 { /* move 4 bytes */
17925 move_bytes = 4;
17926 mode = SImode;
17927 gen_func.mov = gen_movsi;
17929 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
17930 { /* move 2 bytes */
17931 move_bytes = 2;
17932 mode = HImode;
17933 gen_func.mov = gen_movhi;
17935 else if (TARGET_STRING && bytes > 1)
17936 { /* move up to 4 bytes at a time */
17937 move_bytes = (bytes > 4) ? 4 : bytes;
17938 gen_func.movmemsi = gen_movmemsi_1reg;
17940 else /* move 1 byte at a time */
17942 move_bytes = 1;
17943 mode = QImode;
17944 gen_func.mov = gen_movqi;
17947 src = adjust_address (orig_src, mode, offset);
17948 dest = adjust_address (orig_dest, mode, offset);
17950 if (mode != BLKmode)
17952 rtx tmp_reg = gen_reg_rtx (mode);
17954 emit_insn ((*gen_func.mov) (tmp_reg, src));
17955 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
17958 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
17960 int i;
17961 for (i = 0; i < num_reg; i++)
17962 emit_insn (stores[i]);
17963 num_reg = 0;
17966 if (mode == BLKmode)
17968 /* Move the address into scratch registers. The movmemsi
17969 patterns require zero offset. */
17970 if (!REG_P (XEXP (src, 0)))
17972 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
17973 src = replace_equiv_address (src, src_reg);
17975 set_mem_size (src, move_bytes);
17977 if (!REG_P (XEXP (dest, 0)))
17979 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
17980 dest = replace_equiv_address (dest, dest_reg);
17982 set_mem_size (dest, move_bytes);
17984 emit_insn ((*gen_func.movmemsi) (dest, src,
17985 GEN_INT (move_bytes & 31),
17986 align_rtx));
17990 return 1;
17994 /* Return a string to perform a load_multiple operation.
17995 operands[0] is the vector.
17996 operands[1] is the source address.
17997 operands[2] is the first destination register. */
17999 const char *
18000 rs6000_output_load_multiple (rtx operands[3])
18002 /* We have to handle the case where the pseudo used to contain the address
18003 is assigned to one of the output registers. */
18004 int i, j;
18005 int words = XVECLEN (operands[0], 0);
18006 rtx xop[10];
18008 if (XVECLEN (operands[0], 0) == 1)
18009 return "lwz %2,0(%1)";
18011 for (i = 0; i < words; i++)
18012 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
18014 if (i == words-1)
18016 xop[0] = GEN_INT (4 * (words-1));
18017 xop[1] = operands[1];
18018 xop[2] = operands[2];
18019 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
18020 return "";
18022 else if (i == 0)
18024 xop[0] = GEN_INT (4 * (words-1));
18025 xop[1] = operands[1];
18026 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
18027 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
18028 return "";
18030 else
18032 for (j = 0; j < words; j++)
18033 if (j != i)
18035 xop[0] = GEN_INT (j * 4);
18036 xop[1] = operands[1];
18037 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
18038 output_asm_insn ("lwz %2,%0(%1)", xop);
18040 xop[0] = GEN_INT (i * 4);
18041 xop[1] = operands[1];
18042 output_asm_insn ("lwz %1,%0(%1)", xop);
18043 return "";
18047 return "lswi %2,%1,%N0";
18051 /* A validation routine: say whether CODE, a condition code, and MODE
18052 match. The other alternatives either don't make sense or should
18053 never be generated. */
18055 void
18056 validate_condition_mode (enum rtx_code code, machine_mode mode)
18058 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18059 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18060 && GET_MODE_CLASS (mode) == MODE_CC);
18062 /* These don't make sense. */
18063 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18064 || mode != CCUNSmode);
18066 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18067 || mode == CCUNSmode);
18069 gcc_assert (mode == CCFPmode
18070 || (code != ORDERED && code != UNORDERED
18071 && code != UNEQ && code != LTGT
18072 && code != UNGT && code != UNLT
18073 && code != UNGE && code != UNLE));
18075 /* These should never be generated except for
18076 flag_finite_math_only. */
18077 gcc_assert (mode != CCFPmode
18078 || flag_finite_math_only
18079 || (code != LE && code != GE
18080 && code != UNEQ && code != LTGT
18081 && code != UNGT && code != UNLT));
18083 /* These are invalid; the information is not there. */
18084 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18088 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18089 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18090 not zero, store there the bit offset (counted from the right) where
18091 the single stretch of 1 bits begins; and similarly for B, the bit
18092 offset where it ends. */
18094 bool
18095 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18097 unsigned HOST_WIDE_INT val = INTVAL (mask);
18098 unsigned HOST_WIDE_INT bit;
18099 int nb, ne;
18100 int n = GET_MODE_PRECISION (mode);
18102 if (mode != DImode && mode != SImode)
18103 return false;
18105 if (INTVAL (mask) >= 0)
18107 bit = val & -val;
18108 ne = exact_log2 (bit);
18109 nb = exact_log2 (val + bit);
18111 else if (val + 1 == 0)
18113 nb = n;
18114 ne = 0;
18116 else if (val & 1)
18118 val = ~val;
18119 bit = val & -val;
18120 nb = exact_log2 (bit);
18121 ne = exact_log2 (val + bit);
18123 else
18125 bit = val & -val;
18126 ne = exact_log2 (bit);
18127 if (val + bit == 0)
18128 nb = n;
18129 else
18130 nb = 0;
18133 nb--;
18135 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18136 return false;
18138 if (b)
18139 *b = nb;
18140 if (e)
18141 *e = ne;
18143 return true;
18146 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18147 or rldicr instruction, to implement an AND with it in mode MODE. */
18149 bool
18150 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18152 int nb, ne;
18154 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18155 return false;
18157 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18158 does not wrap. */
18159 if (mode == DImode)
18160 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18162 /* For SImode, rlwinm can do everything. */
18163 if (mode == SImode)
18164 return (nb < 32 && ne < 32);
18166 return false;
18169 /* Return the instruction template for an AND with mask in mode MODE, with
18170 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18172 const char *
18173 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18175 int nb, ne;
18177 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18178 gcc_unreachable ();
18180 if (mode == DImode && ne == 0)
18182 operands[3] = GEN_INT (63 - nb);
18183 if (dot)
18184 return "rldicl. %0,%1,0,%3";
18185 return "rldicl %0,%1,0,%3";
18188 if (mode == DImode && nb == 63)
18190 operands[3] = GEN_INT (63 - ne);
18191 if (dot)
18192 return "rldicr. %0,%1,0,%3";
18193 return "rldicr %0,%1,0,%3";
18196 if (nb < 32 && ne < 32)
18198 operands[3] = GEN_INT (31 - nb);
18199 operands[4] = GEN_INT (31 - ne);
18200 if (dot)
18201 return "rlwinm. %0,%1,0,%3,%4";
18202 return "rlwinm %0,%1,0,%3,%4";
18205 gcc_unreachable ();
18208 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18209 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18210 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18212 bool
18213 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18215 int nb, ne;
18217 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18218 return false;
18220 int n = GET_MODE_PRECISION (mode);
18221 int sh = -1;
18223 if (CONST_INT_P (XEXP (shift, 1)))
18225 sh = INTVAL (XEXP (shift, 1));
18226 if (sh < 0 || sh >= n)
18227 return false;
18230 rtx_code code = GET_CODE (shift);
18232 /* Convert any shift by 0 to a rotate, to simplify below code. */
18233 if (sh == 0)
18234 code = ROTATE;
18236 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18237 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18238 code = ASHIFT;
18239 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18241 code = LSHIFTRT;
18242 sh = n - sh;
18245 /* DImode rotates need rld*. */
18246 if (mode == DImode && code == ROTATE)
18247 return (nb == 63 || ne == 0 || ne == sh);
18249 /* SImode rotates need rlw*. */
18250 if (mode == SImode && code == ROTATE)
18251 return (nb < 32 && ne < 32 && sh < 32);
18253 /* Wrap-around masks are only okay for rotates. */
18254 if (ne > nb)
18255 return false;
18257 /* Variable shifts are only okay for rotates. */
18258 if (sh < 0)
18259 return false;
18261 /* Don't allow ASHIFT if the mask is wrong for that. */
18262 if (code == ASHIFT && ne < sh)
18263 return false;
18265 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18266 if the mask is wrong for that. */
18267 if (nb < 32 && ne < 32 && sh < 32
18268 && !(code == LSHIFTRT && nb >= 32 - sh))
18269 return true;
18271 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18272 if the mask is wrong for that. */
18273 if (code == LSHIFTRT)
18274 sh = 64 - sh;
18275 if (nb == 63 || ne == 0 || ne == sh)
18276 return !(code == LSHIFTRT && nb >= sh);
18278 return false;
18281 /* Return the instruction template for a shift with mask in mode MODE, with
18282 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18284 const char *
18285 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18287 int nb, ne;
18289 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18290 gcc_unreachable ();
18292 if (mode == DImode && ne == 0)
18294 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18295 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18296 operands[3] = GEN_INT (63 - nb);
18297 if (dot)
18298 return "rld%I2cl. %0,%1,%2,%3";
18299 return "rld%I2cl %0,%1,%2,%3";
18302 if (mode == DImode && nb == 63)
18304 operands[3] = GEN_INT (63 - ne);
18305 if (dot)
18306 return "rld%I2cr. %0,%1,%2,%3";
18307 return "rld%I2cr %0,%1,%2,%3";
18310 if (mode == DImode
18311 && GET_CODE (operands[4]) != LSHIFTRT
18312 && CONST_INT_P (operands[2])
18313 && ne == INTVAL (operands[2]))
18315 operands[3] = GEN_INT (63 - nb);
18316 if (dot)
18317 return "rld%I2c. %0,%1,%2,%3";
18318 return "rld%I2c %0,%1,%2,%3";
18321 if (nb < 32 && ne < 32)
18323 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18324 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18325 operands[3] = GEN_INT (31 - nb);
18326 operands[4] = GEN_INT (31 - ne);
18327 /* This insn can also be a 64-bit rotate with mask that really makes
18328 it just a shift right (with mask); the %h below are to adjust for
18329 that situation (shift count is >= 32 in that case). */
18330 if (dot)
18331 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18332 return "rlw%I2nm %0,%1,%h2,%3,%4";
18335 gcc_unreachable ();
18338 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18339 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18340 ASHIFT, or LSHIFTRT) in mode MODE. */
18342 bool
18343 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18345 int nb, ne;
18347 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18348 return false;
18350 int n = GET_MODE_PRECISION (mode);
18352 int sh = INTVAL (XEXP (shift, 1));
18353 if (sh < 0 || sh >= n)
18354 return false;
18356 rtx_code code = GET_CODE (shift);
18358 /* Convert any shift by 0 to a rotate, to simplify below code. */
18359 if (sh == 0)
18360 code = ROTATE;
18362 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18363 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18364 code = ASHIFT;
18365 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18367 code = LSHIFTRT;
18368 sh = n - sh;
18371 /* DImode rotates need rldimi. */
18372 if (mode == DImode && code == ROTATE)
18373 return (ne == sh);
18375 /* SImode rotates need rlwimi. */
18376 if (mode == SImode && code == ROTATE)
18377 return (nb < 32 && ne < 32 && sh < 32);
18379 /* Wrap-around masks are only okay for rotates. */
18380 if (ne > nb)
18381 return false;
18383 /* Don't allow ASHIFT if the mask is wrong for that. */
18384 if (code == ASHIFT && ne < sh)
18385 return false;
18387 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18388 if the mask is wrong for that. */
18389 if (nb < 32 && ne < 32 && sh < 32
18390 && !(code == LSHIFTRT && nb >= 32 - sh))
18391 return true;
18393 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18394 if the mask is wrong for that. */
18395 if (code == LSHIFTRT)
18396 sh = 64 - sh;
18397 if (ne == sh)
18398 return !(code == LSHIFTRT && nb >= sh);
18400 return false;
18403 /* Return the instruction template for an insert with mask in mode MODE, with
18404 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18406 const char *
18407 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18409 int nb, ne;
18411 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18412 gcc_unreachable ();
18414 /* Prefer rldimi because rlwimi is cracked. */
18415 if (TARGET_POWERPC64
18416 && (!dot || mode == DImode)
18417 && GET_CODE (operands[4]) != LSHIFTRT
18418 && ne == INTVAL (operands[2]))
18420 operands[3] = GEN_INT (63 - nb);
18421 if (dot)
18422 return "rldimi. %0,%1,%2,%3";
18423 return "rldimi %0,%1,%2,%3";
18426 if (nb < 32 && ne < 32)
18428 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18429 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18430 operands[3] = GEN_INT (31 - nb);
18431 operands[4] = GEN_INT (31 - ne);
18432 if (dot)
18433 return "rlwimi. %0,%1,%2,%3,%4";
18434 return "rlwimi %0,%1,%2,%3,%4";
18437 gcc_unreachable ();
18440 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18441 using two machine instructions. */
18443 bool
18444 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18446 /* There are two kinds of AND we can handle with two insns:
18447 1) those we can do with two rl* insn;
18448 2) ori[s];xori[s].
18450 We do not handle that last case yet. */
18452 /* If there is just one stretch of ones, we can do it. */
18453 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18454 return true;
18456 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18457 one insn, we can do the whole thing with two. */
18458 unsigned HOST_WIDE_INT val = INTVAL (c);
18459 unsigned HOST_WIDE_INT bit1 = val & -val;
18460 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18461 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18462 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18463 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18466 /* Emit a potentially record-form instruction, setting DST from SRC.
18467 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18468 signed comparison of DST with zero. If DOT is 1, the generated RTL
18469 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18470 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18471 a separate COMPARE. */
18473 static void
18474 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18476 if (dot == 0)
18478 emit_move_insn (dst, src);
18479 return;
18482 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18484 emit_move_insn (dst, src);
18485 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18486 return;
18489 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18490 if (dot == 1)
18492 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18493 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18495 else
18497 rtx set = gen_rtx_SET (dst, src);
18498 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18502 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18503 If EXPAND is true, split rotate-and-mask instructions we generate to
18504 their constituent parts as well (this is used during expand); if DOT
18505 is 1, make the last insn a record-form instruction clobbering the
18506 destination GPR and setting the CC reg (from operands[3]); if 2, set
18507 that GPR as well as the CC reg. */
18509 void
18510 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18512 gcc_assert (!(expand && dot));
18514 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18516 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18517 shift right. This generates better code than doing the masks without
18518 shifts, or shifting first right and then left. */
18519 int nb, ne;
18520 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18522 gcc_assert (mode == DImode);
18524 int shift = 63 - nb;
18525 if (expand)
18527 rtx tmp1 = gen_reg_rtx (DImode);
18528 rtx tmp2 = gen_reg_rtx (DImode);
18529 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18530 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18531 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18533 else
18535 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18536 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18537 emit_move_insn (operands[0], tmp);
18538 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18539 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18541 return;
18544 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18545 that does the rest. */
18546 unsigned HOST_WIDE_INT bit1 = val & -val;
18547 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18548 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18549 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18551 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18552 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18554 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18556 /* Two "no-rotate"-and-mask instructions, for SImode. */
18557 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18559 gcc_assert (mode == SImode);
18561 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18562 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18563 emit_move_insn (reg, tmp);
18564 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18565 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18566 return;
18569 gcc_assert (mode == DImode);
18571 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18572 insns; we have to do the first in SImode, because it wraps. */
18573 if (mask2 <= 0xffffffff
18574 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18576 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18577 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18578 GEN_INT (mask1));
18579 rtx reg_low = gen_lowpart (SImode, reg);
18580 emit_move_insn (reg_low, tmp);
18581 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18582 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18583 return;
18586 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18587 at the top end), rotate back and clear the other hole. */
18588 int right = exact_log2 (bit3);
18589 int left = 64 - right;
18591 /* Rotate the mask too. */
18592 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18594 if (expand)
18596 rtx tmp1 = gen_reg_rtx (DImode);
18597 rtx tmp2 = gen_reg_rtx (DImode);
18598 rtx tmp3 = gen_reg_rtx (DImode);
18599 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18600 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18601 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18602 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18604 else
18606 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18607 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18608 emit_move_insn (operands[0], tmp);
18609 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18610 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18611 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18615 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18616 for lfq and stfq insns iff the registers are hard registers. */
18619 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18621 /* We might have been passed a SUBREG. */
18622 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18623 return 0;
18625 /* We might have been passed non floating point registers. */
18626 if (!FP_REGNO_P (REGNO (reg1))
18627 || !FP_REGNO_P (REGNO (reg2)))
18628 return 0;
18630 return (REGNO (reg1) == REGNO (reg2) - 1);
18633 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18634 addr1 and addr2 must be in consecutive memory locations
18635 (addr2 == addr1 + 8). */
18638 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18640 rtx addr1, addr2;
18641 unsigned int reg1, reg2;
18642 int offset1, offset2;
18644 /* The mems cannot be volatile. */
18645 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18646 return 0;
18648 addr1 = XEXP (mem1, 0);
18649 addr2 = XEXP (mem2, 0);
18651 /* Extract an offset (if used) from the first addr. */
18652 if (GET_CODE (addr1) == PLUS)
18654 /* If not a REG, return zero. */
18655 if (GET_CODE (XEXP (addr1, 0)) != REG)
18656 return 0;
18657 else
18659 reg1 = REGNO (XEXP (addr1, 0));
18660 /* The offset must be constant! */
18661 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18662 return 0;
18663 offset1 = INTVAL (XEXP (addr1, 1));
18666 else if (GET_CODE (addr1) != REG)
18667 return 0;
18668 else
18670 reg1 = REGNO (addr1);
18671 /* This was a simple (mem (reg)) expression. Offset is 0. */
18672 offset1 = 0;
18675 /* And now for the second addr. */
18676 if (GET_CODE (addr2) == PLUS)
18678 /* If not a REG, return zero. */
18679 if (GET_CODE (XEXP (addr2, 0)) != REG)
18680 return 0;
18681 else
18683 reg2 = REGNO (XEXP (addr2, 0));
18684 /* The offset must be constant. */
18685 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18686 return 0;
18687 offset2 = INTVAL (XEXP (addr2, 1));
18690 else if (GET_CODE (addr2) != REG)
18691 return 0;
18692 else
18694 reg2 = REGNO (addr2);
18695 /* This was a simple (mem (reg)) expression. Offset is 0. */
18696 offset2 = 0;
18699 /* Both of these must have the same base register. */
18700 if (reg1 != reg2)
18701 return 0;
18703 /* The offset for the second addr must be 8 more than the first addr. */
18704 if (offset2 != offset1 + 8)
18705 return 0;
18707 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18708 instructions. */
18709 return 1;
18714 rs6000_secondary_memory_needed_rtx (machine_mode mode)
18716 static bool eliminated = false;
18717 rtx ret;
18719 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
18720 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18721 else
18723 rtx mem = cfun->machine->sdmode_stack_slot;
18724 gcc_assert (mem != NULL_RTX);
18726 if (!eliminated)
18728 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
18729 cfun->machine->sdmode_stack_slot = mem;
18730 eliminated = true;
18732 ret = mem;
18735 if (TARGET_DEBUG_ADDR)
18737 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
18738 GET_MODE_NAME (mode));
18739 if (!ret)
18740 fprintf (stderr, "\tNULL_RTX\n");
18741 else
18742 debug_rtx (ret);
18745 return ret;
18748 /* Return the mode to be used for memory when a secondary memory
18749 location is needed. For SDmode values we need to use DDmode, in
18750 all other cases we can use the same mode. */
18751 machine_mode
18752 rs6000_secondary_memory_needed_mode (machine_mode mode)
18754 if (lra_in_progress && mode == SDmode)
18755 return DDmode;
18756 return mode;
18759 static tree
18760 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
18762 /* Don't walk into types. */
18763 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
18765 *walk_subtrees = 0;
18766 return NULL_TREE;
18769 switch (TREE_CODE (*tp))
18771 case VAR_DECL:
18772 case PARM_DECL:
18773 case FIELD_DECL:
18774 case RESULT_DECL:
18775 case SSA_NAME:
18776 case REAL_CST:
18777 case MEM_REF:
18778 case VIEW_CONVERT_EXPR:
18779 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
18780 return *tp;
18781 break;
18782 default:
18783 break;
18786 return NULL_TREE;
18789 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18790 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18791 only work on the traditional altivec registers, note if an altivec register
18792 was chosen. */
18794 static enum rs6000_reg_type
18795 register_to_reg_type (rtx reg, bool *is_altivec)
18797 HOST_WIDE_INT regno;
18798 enum reg_class rclass;
18800 if (GET_CODE (reg) == SUBREG)
18801 reg = SUBREG_REG (reg);
18803 if (!REG_P (reg))
18804 return NO_REG_TYPE;
18806 regno = REGNO (reg);
18807 if (regno >= FIRST_PSEUDO_REGISTER)
18809 if (!lra_in_progress && !reload_in_progress && !reload_completed)
18810 return PSEUDO_REG_TYPE;
18812 regno = true_regnum (reg);
18813 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18814 return PSEUDO_REG_TYPE;
18817 gcc_assert (regno >= 0);
18819 if (is_altivec && ALTIVEC_REGNO_P (regno))
18820 *is_altivec = true;
18822 rclass = rs6000_regno_regclass[regno];
18823 return reg_class_to_reg_type[(int)rclass];
18826 /* Helper function to return the cost of adding a TOC entry address. */
18828 static inline int
18829 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18831 int ret;
18833 if (TARGET_CMODEL != CMODEL_SMALL)
18834 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18836 else
18837 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18839 return ret;
18842 /* Helper function for rs6000_secondary_reload to determine whether the memory
18843 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18844 needs reloading. Return negative if the memory is not handled by the memory
18845 helper functions and to try a different reload method, 0 if no additional
18846 instructions are need, and positive to give the extra cost for the
18847 memory. */
18849 static int
18850 rs6000_secondary_reload_memory (rtx addr,
18851 enum reg_class rclass,
18852 machine_mode mode)
18854 int extra_cost = 0;
18855 rtx reg, and_arg, plus_arg0, plus_arg1;
18856 addr_mask_type addr_mask;
18857 const char *type = NULL;
18858 const char *fail_msg = NULL;
18860 if (GPR_REG_CLASS_P (rclass))
18861 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18863 else if (rclass == FLOAT_REGS)
18864 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18866 else if (rclass == ALTIVEC_REGS)
18867 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18869 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18870 else if (rclass == VSX_REGS)
18871 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18872 & ~RELOAD_REG_AND_M16);
18874 /* If the register allocator hasn't made up its mind yet on the register
18875 class to use, settle on defaults to use. */
18876 else if (rclass == NO_REGS)
18878 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18879 & ~RELOAD_REG_AND_M16);
18881 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18882 addr_mask &= ~(RELOAD_REG_INDEXED
18883 | RELOAD_REG_PRE_INCDEC
18884 | RELOAD_REG_PRE_MODIFY);
18887 else
18888 addr_mask = 0;
18890 /* If the register isn't valid in this register class, just return now. */
18891 if ((addr_mask & RELOAD_REG_VALID) == 0)
18893 if (TARGET_DEBUG_ADDR)
18895 fprintf (stderr,
18896 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18897 "not valid in class\n",
18898 GET_MODE_NAME (mode), reg_class_names[rclass]);
18899 debug_rtx (addr);
18902 return -1;
18905 switch (GET_CODE (addr))
18907 /* Does the register class supports auto update forms for this mode? We
18908 don't need a scratch register, since the powerpc only supports
18909 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18910 case PRE_INC:
18911 case PRE_DEC:
18912 reg = XEXP (addr, 0);
18913 if (!base_reg_operand (addr, GET_MODE (reg)))
18915 fail_msg = "no base register #1";
18916 extra_cost = -1;
18919 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18921 extra_cost = 1;
18922 type = "update";
18924 break;
18926 case PRE_MODIFY:
18927 reg = XEXP (addr, 0);
18928 plus_arg1 = XEXP (addr, 1);
18929 if (!base_reg_operand (reg, GET_MODE (reg))
18930 || GET_CODE (plus_arg1) != PLUS
18931 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18933 fail_msg = "bad PRE_MODIFY";
18934 extra_cost = -1;
18937 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18939 extra_cost = 1;
18940 type = "update";
18942 break;
18944 /* Do we need to simulate AND -16 to clear the bottom address bits used
18945 in VMX load/stores? Only allow the AND for vector sizes. */
18946 case AND:
18947 and_arg = XEXP (addr, 0);
18948 if (GET_MODE_SIZE (mode) != 16
18949 || GET_CODE (XEXP (addr, 1)) != CONST_INT
18950 || INTVAL (XEXP (addr, 1)) != -16)
18952 fail_msg = "bad Altivec AND #1";
18953 extra_cost = -1;
18956 if (rclass != ALTIVEC_REGS)
18958 if (legitimate_indirect_address_p (and_arg, false))
18959 extra_cost = 1;
18961 else if (legitimate_indexed_address_p (and_arg, false))
18962 extra_cost = 2;
18964 else
18966 fail_msg = "bad Altivec AND #2";
18967 extra_cost = -1;
18970 type = "and";
18972 break;
18974 /* If this is an indirect address, make sure it is a base register. */
18975 case REG:
18976 case SUBREG:
18977 if (!legitimate_indirect_address_p (addr, false))
18979 extra_cost = 1;
18980 type = "move";
18982 break;
18984 /* If this is an indexed address, make sure the register class can handle
18985 indexed addresses for this mode. */
18986 case PLUS:
18987 plus_arg0 = XEXP (addr, 0);
18988 plus_arg1 = XEXP (addr, 1);
18990 /* (plus (plus (reg) (constant)) (constant)) is generated during
18991 push_reload processing, so handle it now. */
18992 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18994 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18996 extra_cost = 1;
18997 type = "offset";
19001 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19002 push_reload processing, so handle it now. */
19003 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19005 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19007 extra_cost = 1;
19008 type = "indexed #2";
19012 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19014 fail_msg = "no base register #2";
19015 extra_cost = -1;
19018 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19020 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19021 || !legitimate_indexed_address_p (addr, false))
19023 extra_cost = 1;
19024 type = "indexed";
19028 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19029 && CONST_INT_P (plus_arg1))
19031 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19033 extra_cost = 1;
19034 type = "vector d-form offset";
19038 /* Make sure the register class can handle offset addresses. */
19039 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19041 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19043 extra_cost = 1;
19044 type = "offset #2";
19048 else
19050 fail_msg = "bad PLUS";
19051 extra_cost = -1;
19054 break;
19056 case LO_SUM:
19057 /* Quad offsets are restricted and can't handle normal addresses. */
19058 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19060 extra_cost = -1;
19061 type = "vector d-form lo_sum";
19064 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19066 fail_msg = "bad LO_SUM";
19067 extra_cost = -1;
19070 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19072 extra_cost = 1;
19073 type = "lo_sum";
19075 break;
19077 /* Static addresses need to create a TOC entry. */
19078 case CONST:
19079 case SYMBOL_REF:
19080 case LABEL_REF:
19081 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19083 extra_cost = -1;
19084 type = "vector d-form lo_sum #2";
19087 else
19089 type = "address";
19090 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19092 break;
19094 /* TOC references look like offsetable memory. */
19095 case UNSPEC:
19096 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19098 fail_msg = "bad UNSPEC";
19099 extra_cost = -1;
19102 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19104 extra_cost = -1;
19105 type = "vector d-form lo_sum #3";
19108 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19110 extra_cost = 1;
19111 type = "toc reference";
19113 break;
19115 default:
19117 fail_msg = "bad address";
19118 extra_cost = -1;
19122 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19124 if (extra_cost < 0)
19125 fprintf (stderr,
19126 "rs6000_secondary_reload_memory error: mode = %s, "
19127 "class = %s, addr_mask = '%s', %s\n",
19128 GET_MODE_NAME (mode),
19129 reg_class_names[rclass],
19130 rs6000_debug_addr_mask (addr_mask, false),
19131 (fail_msg != NULL) ? fail_msg : "<bad address>");
19133 else
19134 fprintf (stderr,
19135 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19136 "addr_mask = '%s', extra cost = %d, %s\n",
19137 GET_MODE_NAME (mode),
19138 reg_class_names[rclass],
19139 rs6000_debug_addr_mask (addr_mask, false),
19140 extra_cost,
19141 (type) ? type : "<none>");
19143 debug_rtx (addr);
19146 return extra_cost;
19149 /* Helper function for rs6000_secondary_reload to return true if a move to a
19150 different register classe is really a simple move. */
19152 static bool
19153 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19154 enum rs6000_reg_type from_type,
19155 machine_mode mode)
19157 int size;
19159 /* Add support for various direct moves available. In this function, we only
19160 look at cases where we don't need any extra registers, and one or more
19161 simple move insns are issued. At present, 32-bit integers are not allowed
19162 in FPR/VSX registers. Single precision binary floating is not a simple
19163 move because we need to convert to the single precision memory layout.
19164 The 4-byte SDmode can be moved. */
19165 size = GET_MODE_SIZE (mode);
19166 if (TARGET_DIRECT_MOVE
19167 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
19168 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19169 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19170 return true;
19172 else if (TARGET_DIRECT_MOVE_128 && size == 16
19173 && ((to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19174 || (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)))
19175 return true;
19177 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19178 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19179 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19180 return true;
19182 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19183 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19184 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19185 return true;
19187 return false;
19190 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19191 special direct moves that involve allocating an extra register, return the
19192 insn code of the helper function if there is such a function or
19193 CODE_FOR_nothing if not. */
19195 static bool
19196 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19197 enum rs6000_reg_type from_type,
19198 machine_mode mode,
19199 secondary_reload_info *sri,
19200 bool altivec_p)
19202 bool ret = false;
19203 enum insn_code icode = CODE_FOR_nothing;
19204 int cost = 0;
19205 int size = GET_MODE_SIZE (mode);
19207 if (TARGET_POWERPC64)
19209 if (size == 16)
19211 /* Handle moving 128-bit values from GPRs to VSX point registers on
19212 ISA 2.07 (power8, power9) when running in 64-bit mode using
19213 XXPERMDI to glue the two 64-bit values back together. */
19214 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19216 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19217 icode = reg_addr[mode].reload_vsx_gpr;
19220 /* Handle moving 128-bit values from VSX point registers to GPRs on
19221 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19222 bottom 64-bit value. */
19223 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19225 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19226 icode = reg_addr[mode].reload_gpr_vsx;
19230 else if (mode == SFmode)
19232 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19234 cost = 3; /* xscvdpspn, mfvsrd, and. */
19235 icode = reg_addr[mode].reload_gpr_vsx;
19238 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19240 cost = 2; /* mtvsrz, xscvspdpn. */
19241 icode = reg_addr[mode].reload_vsx_gpr;
19246 if (TARGET_POWERPC64 && size == 16)
19248 /* Handle moving 128-bit values from GPRs to VSX point registers on
19249 ISA 2.07 when running in 64-bit mode using XXPERMDI to glue the two
19250 64-bit values back together. */
19251 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19253 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19254 icode = reg_addr[mode].reload_vsx_gpr;
19257 /* Handle moving 128-bit values from VSX point registers to GPRs on
19258 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19259 bottom 64-bit value. */
19260 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19262 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19263 icode = reg_addr[mode].reload_gpr_vsx;
19267 else if (!TARGET_POWERPC64 && size == 8)
19269 /* Handle moving 64-bit values from GPRs to floating point registers on
19270 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19271 32-bit values back together. Altivec register classes must be handled
19272 specially since a different instruction is used, and the secondary
19273 reload support requires a single instruction class in the scratch
19274 register constraint. However, right now TFmode is not allowed in
19275 Altivec registers, so the pattern will never match. */
19276 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19278 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19279 icode = reg_addr[mode].reload_fpr_gpr;
19283 if (icode != CODE_FOR_nothing)
19285 ret = true;
19286 if (sri)
19288 sri->icode = icode;
19289 sri->extra_cost = cost;
19293 return ret;
19296 /* Return whether a move between two register classes can be done either
19297 directly (simple move) or via a pattern that uses a single extra temporary
19298 (using ISA 2.07's direct move in this case. */
19300 static bool
19301 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19302 enum rs6000_reg_type from_type,
19303 machine_mode mode,
19304 secondary_reload_info *sri,
19305 bool altivec_p)
19307 /* Fall back to load/store reloads if either type is not a register. */
19308 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19309 return false;
19311 /* If we haven't allocated registers yet, assume the move can be done for the
19312 standard register types. */
19313 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19314 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19315 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19316 return true;
19318 /* Moves to the same set of registers is a simple move for non-specialized
19319 registers. */
19320 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19321 return true;
19323 /* Check whether a simple move can be done directly. */
19324 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19326 if (sri)
19328 sri->icode = CODE_FOR_nothing;
19329 sri->extra_cost = 0;
19331 return true;
19334 /* Now check if we can do it in a few steps. */
19335 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19336 altivec_p);
19339 /* Inform reload about cases where moving X with a mode MODE to a register in
19340 RCLASS requires an extra scratch or immediate register. Return the class
19341 needed for the immediate register.
19343 For VSX and Altivec, we may need a register to convert sp+offset into
19344 reg+sp.
19346 For misaligned 64-bit gpr loads and stores we need a register to
19347 convert an offset address to indirect. */
19349 static reg_class_t
19350 rs6000_secondary_reload (bool in_p,
19351 rtx x,
19352 reg_class_t rclass_i,
19353 machine_mode mode,
19354 secondary_reload_info *sri)
19356 enum reg_class rclass = (enum reg_class) rclass_i;
19357 reg_class_t ret = ALL_REGS;
19358 enum insn_code icode;
19359 bool default_p = false;
19360 bool done_p = false;
19362 /* Allow subreg of memory before/during reload. */
19363 bool memory_p = (MEM_P (x)
19364 || (!reload_completed && GET_CODE (x) == SUBREG
19365 && MEM_P (SUBREG_REG (x))));
19367 sri->icode = CODE_FOR_nothing;
19368 sri->extra_cost = 0;
19369 icode = ((in_p)
19370 ? reg_addr[mode].reload_load
19371 : reg_addr[mode].reload_store);
19373 if (REG_P (x) || register_operand (x, mode))
19375 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19376 bool altivec_p = (rclass == ALTIVEC_REGS);
19377 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19379 if (!in_p)
19381 enum rs6000_reg_type exchange = to_type;
19382 to_type = from_type;
19383 from_type = exchange;
19386 /* Can we do a direct move of some sort? */
19387 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19388 altivec_p))
19390 icode = (enum insn_code)sri->icode;
19391 default_p = false;
19392 done_p = true;
19393 ret = NO_REGS;
19397 /* Make sure 0.0 is not reloaded or forced into memory. */
19398 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19400 ret = NO_REGS;
19401 default_p = false;
19402 done_p = true;
19405 /* If this is a scalar floating point value and we want to load it into the
19406 traditional Altivec registers, do it via a move via a traditional floating
19407 point register, unless we have D-form addressing. Also make sure that
19408 non-zero constants use a FPR. */
19409 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19410 && !mode_supports_vmx_dform (mode)
19411 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19412 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19414 ret = FLOAT_REGS;
19415 default_p = false;
19416 done_p = true;
19419 /* Handle reload of load/stores if we have reload helper functions. */
19420 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19422 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19423 mode);
19425 if (extra_cost >= 0)
19427 done_p = true;
19428 ret = NO_REGS;
19429 if (extra_cost > 0)
19431 sri->extra_cost = extra_cost;
19432 sri->icode = icode;
19437 /* Handle unaligned loads and stores of integer registers. */
19438 if (!done_p && TARGET_POWERPC64
19439 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19440 && memory_p
19441 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19443 rtx addr = XEXP (x, 0);
19444 rtx off = address_offset (addr);
19446 if (off != NULL_RTX)
19448 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19449 unsigned HOST_WIDE_INT offset = INTVAL (off);
19451 /* We need a secondary reload when our legitimate_address_p
19452 says the address is good (as otherwise the entire address
19453 will be reloaded), and the offset is not a multiple of
19454 four or we have an address wrap. Address wrap will only
19455 occur for LO_SUMs since legitimate_offset_address_p
19456 rejects addresses for 16-byte mems that will wrap. */
19457 if (GET_CODE (addr) == LO_SUM
19458 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19459 && ((offset & 3) != 0
19460 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19461 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19462 && (offset & 3) != 0))
19464 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19465 if (in_p)
19466 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19467 : CODE_FOR_reload_di_load);
19468 else
19469 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19470 : CODE_FOR_reload_di_store);
19471 sri->extra_cost = 2;
19472 ret = NO_REGS;
19473 done_p = true;
19475 else
19476 default_p = true;
19478 else
19479 default_p = true;
19482 if (!done_p && !TARGET_POWERPC64
19483 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19484 && memory_p
19485 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19487 rtx addr = XEXP (x, 0);
19488 rtx off = address_offset (addr);
19490 if (off != NULL_RTX)
19492 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19493 unsigned HOST_WIDE_INT offset = INTVAL (off);
19495 /* We need a secondary reload when our legitimate_address_p
19496 says the address is good (as otherwise the entire address
19497 will be reloaded), and we have a wrap.
19499 legitimate_lo_sum_address_p allows LO_SUM addresses to
19500 have any offset so test for wrap in the low 16 bits.
19502 legitimate_offset_address_p checks for the range
19503 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19504 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19505 [0x7ff4,0x7fff] respectively, so test for the
19506 intersection of these ranges, [0x7ffc,0x7fff] and
19507 [0x7ff4,0x7ff7] respectively.
19509 Note that the address we see here may have been
19510 manipulated by legitimize_reload_address. */
19511 if (GET_CODE (addr) == LO_SUM
19512 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19513 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19515 if (in_p)
19516 sri->icode = CODE_FOR_reload_si_load;
19517 else
19518 sri->icode = CODE_FOR_reload_si_store;
19519 sri->extra_cost = 2;
19520 ret = NO_REGS;
19521 done_p = true;
19523 else
19524 default_p = true;
19526 else
19527 default_p = true;
19530 if (!done_p)
19531 default_p = true;
19533 if (default_p)
19534 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19536 gcc_assert (ret != ALL_REGS);
19538 if (TARGET_DEBUG_ADDR)
19540 fprintf (stderr,
19541 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19542 "mode = %s",
19543 reg_class_names[ret],
19544 in_p ? "true" : "false",
19545 reg_class_names[rclass],
19546 GET_MODE_NAME (mode));
19548 if (reload_completed)
19549 fputs (", after reload", stderr);
19551 if (!done_p)
19552 fputs (", done_p not set", stderr);
19554 if (default_p)
19555 fputs (", default secondary reload", stderr);
19557 if (sri->icode != CODE_FOR_nothing)
19558 fprintf (stderr, ", reload func = %s, extra cost = %d",
19559 insn_data[sri->icode].name, sri->extra_cost);
19561 else if (sri->extra_cost > 0)
19562 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19564 fputs ("\n", stderr);
19565 debug_rtx (x);
19568 return ret;
19571 /* Better tracing for rs6000_secondary_reload_inner. */
19573 static void
19574 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19575 bool store_p)
19577 rtx set, clobber;
19579 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19581 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19582 store_p ? "store" : "load");
19584 if (store_p)
19585 set = gen_rtx_SET (mem, reg);
19586 else
19587 set = gen_rtx_SET (reg, mem);
19589 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19590 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19593 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19594 ATTRIBUTE_NORETURN;
19596 static void
19597 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19598 bool store_p)
19600 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19601 gcc_unreachable ();
19604 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19605 reload helper functions. These were identified in
19606 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19607 reload, it calls the insns:
19608 reload_<RELOAD:mode>_<P:mptrsize>_store
19609 reload_<RELOAD:mode>_<P:mptrsize>_load
19611 which in turn calls this function, to do whatever is necessary to create
19612 valid addresses. */
19614 void
19615 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19617 int regno = true_regnum (reg);
19618 machine_mode mode = GET_MODE (reg);
19619 addr_mask_type addr_mask;
19620 rtx addr;
19621 rtx new_addr;
19622 rtx op_reg, op0, op1;
19623 rtx and_op;
19624 rtx cc_clobber;
19625 rtvec rv;
19627 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19628 || !base_reg_operand (scratch, GET_MODE (scratch)))
19629 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19631 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19632 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19634 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19635 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19637 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19638 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19640 else
19641 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19643 /* Make sure the mode is valid in this register class. */
19644 if ((addr_mask & RELOAD_REG_VALID) == 0)
19645 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19647 if (TARGET_DEBUG_ADDR)
19648 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19650 new_addr = addr = XEXP (mem, 0);
19651 switch (GET_CODE (addr))
19653 /* Does the register class support auto update forms for this mode? If
19654 not, do the update now. We don't need a scratch register, since the
19655 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19656 case PRE_INC:
19657 case PRE_DEC:
19658 op_reg = XEXP (addr, 0);
19659 if (!base_reg_operand (op_reg, Pmode))
19660 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19662 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19664 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19665 new_addr = op_reg;
19667 break;
19669 case PRE_MODIFY:
19670 op0 = XEXP (addr, 0);
19671 op1 = XEXP (addr, 1);
19672 if (!base_reg_operand (op0, Pmode)
19673 || GET_CODE (op1) != PLUS
19674 || !rtx_equal_p (op0, XEXP (op1, 0)))
19675 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19677 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19679 emit_insn (gen_rtx_SET (op0, op1));
19680 new_addr = reg;
19682 break;
19684 /* Do we need to simulate AND -16 to clear the bottom address bits used
19685 in VMX load/stores? */
19686 case AND:
19687 op0 = XEXP (addr, 0);
19688 op1 = XEXP (addr, 1);
19689 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19691 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19692 op_reg = op0;
19694 else if (GET_CODE (op1) == PLUS)
19696 emit_insn (gen_rtx_SET (scratch, op1));
19697 op_reg = scratch;
19700 else
19701 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19703 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19704 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19705 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19706 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19707 new_addr = scratch;
19709 break;
19711 /* If this is an indirect address, make sure it is a base register. */
19712 case REG:
19713 case SUBREG:
19714 if (!base_reg_operand (addr, GET_MODE (addr)))
19716 emit_insn (gen_rtx_SET (scratch, addr));
19717 new_addr = scratch;
19719 break;
19721 /* If this is an indexed address, make sure the register class can handle
19722 indexed addresses for this mode. */
19723 case PLUS:
19724 op0 = XEXP (addr, 0);
19725 op1 = XEXP (addr, 1);
19726 if (!base_reg_operand (op0, Pmode))
19727 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19729 else if (int_reg_operand (op1, Pmode))
19731 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19733 emit_insn (gen_rtx_SET (scratch, addr));
19734 new_addr = scratch;
19738 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
19740 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19741 || !quad_address_p (addr, mode, false))
19743 emit_insn (gen_rtx_SET (scratch, addr));
19744 new_addr = scratch;
19748 /* Make sure the register class can handle offset addresses. */
19749 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19751 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19753 emit_insn (gen_rtx_SET (scratch, addr));
19754 new_addr = scratch;
19758 else
19759 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19761 break;
19763 case LO_SUM:
19764 op0 = XEXP (addr, 0);
19765 op1 = XEXP (addr, 1);
19766 if (!base_reg_operand (op0, Pmode))
19767 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19769 else if (int_reg_operand (op1, Pmode))
19771 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19773 emit_insn (gen_rtx_SET (scratch, addr));
19774 new_addr = scratch;
19778 /* Quad offsets are restricted and can't handle normal addresses. */
19779 else if (mode_supports_vsx_dform_quad (mode))
19781 emit_insn (gen_rtx_SET (scratch, addr));
19782 new_addr = scratch;
19785 /* Make sure the register class can handle offset addresses. */
19786 else if (legitimate_lo_sum_address_p (mode, addr, false))
19788 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19790 emit_insn (gen_rtx_SET (scratch, addr));
19791 new_addr = scratch;
19795 else
19796 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19798 break;
19800 case SYMBOL_REF:
19801 case CONST:
19802 case LABEL_REF:
19803 rs6000_emit_move (scratch, addr, Pmode);
19804 new_addr = scratch;
19805 break;
19807 default:
19808 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19811 /* Adjust the address if it changed. */
19812 if (addr != new_addr)
19814 mem = replace_equiv_address_nv (mem, new_addr);
19815 if (TARGET_DEBUG_ADDR)
19816 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19819 /* Now create the move. */
19820 if (store_p)
19821 emit_insn (gen_rtx_SET (mem, reg));
19822 else
19823 emit_insn (gen_rtx_SET (reg, mem));
19825 return;
19828 /* Convert reloads involving 64-bit gprs and misaligned offset
19829 addressing, or multiple 32-bit gprs and offsets that are too large,
19830 to use indirect addressing. */
19832 void
19833 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19835 int regno = true_regnum (reg);
19836 enum reg_class rclass;
19837 rtx addr;
19838 rtx scratch_or_premodify = scratch;
19840 if (TARGET_DEBUG_ADDR)
19842 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19843 store_p ? "store" : "load");
19844 fprintf (stderr, "reg:\n");
19845 debug_rtx (reg);
19846 fprintf (stderr, "mem:\n");
19847 debug_rtx (mem);
19848 fprintf (stderr, "scratch:\n");
19849 debug_rtx (scratch);
19852 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19853 gcc_assert (GET_CODE (mem) == MEM);
19854 rclass = REGNO_REG_CLASS (regno);
19855 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19856 addr = XEXP (mem, 0);
19858 if (GET_CODE (addr) == PRE_MODIFY)
19860 gcc_assert (REG_P (XEXP (addr, 0))
19861 && GET_CODE (XEXP (addr, 1)) == PLUS
19862 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19863 scratch_or_premodify = XEXP (addr, 0);
19864 if (!HARD_REGISTER_P (scratch_or_premodify))
19865 /* If we have a pseudo here then reload will have arranged
19866 to have it replaced, but only in the original insn.
19867 Use the replacement here too. */
19868 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19870 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19871 expressions from the original insn, without unsharing them.
19872 Any RTL that points into the original insn will of course
19873 have register replacements applied. That is why we don't
19874 need to look for replacements under the PLUS. */
19875 addr = XEXP (addr, 1);
19877 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19879 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19881 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19883 /* Now create the move. */
19884 if (store_p)
19885 emit_insn (gen_rtx_SET (mem, reg));
19886 else
19887 emit_insn (gen_rtx_SET (reg, mem));
19889 return;
19892 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
19893 this function has any SDmode references. If we are on a power7 or later, we
19894 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
19895 can load/store the value. */
19897 static void
19898 rs6000_alloc_sdmode_stack_slot (void)
19900 tree t;
19901 basic_block bb;
19902 gimple_stmt_iterator gsi;
19904 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
19905 /* We use a different approach for dealing with the secondary
19906 memory in LRA. */
19907 if (ira_use_lra_p)
19908 return;
19910 if (TARGET_NO_SDMODE_STACK)
19911 return;
19913 FOR_EACH_BB_FN (bb, cfun)
19914 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
19916 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
19917 if (ret)
19919 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
19920 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
19921 SDmode, 0);
19922 return;
19926 /* Check for any SDmode parameters of the function. */
19927 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
19929 if (TREE_TYPE (t) == error_mark_node)
19930 continue;
19932 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
19933 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
19935 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
19936 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
19937 SDmode, 0);
19938 return;
19943 static void
19944 rs6000_instantiate_decls (void)
19946 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
19947 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
19950 /* Given an rtx X being reloaded into a reg required to be
19951 in class CLASS, return the class of reg to actually use.
19952 In general this is just CLASS; but on some machines
19953 in some cases it is preferable to use a more restrictive class.
19955 On the RS/6000, we have to return NO_REGS when we want to reload a
19956 floating-point CONST_DOUBLE to force it to be copied to memory.
19958 We also don't want to reload integer values into floating-point
19959 registers if we can at all help it. In fact, this can
19960 cause reload to die, if it tries to generate a reload of CTR
19961 into a FP register and discovers it doesn't have the memory location
19962 required.
19964 ??? Would it be a good idea to have reload do the converse, that is
19965 try to reload floating modes into FP registers if possible?
19968 static enum reg_class
19969 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19971 machine_mode mode = GET_MODE (x);
19972 bool is_constant = CONSTANT_P (x);
19974 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19975 reload class for it. */
19976 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19977 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19978 return NO_REGS;
19980 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19981 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19982 return NO_REGS;
19984 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19985 the reloading of address expressions using PLUS into floating point
19986 registers. */
19987 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19989 if (is_constant)
19991 /* Zero is always allowed in all VSX registers. */
19992 if (x == CONST0_RTX (mode))
19993 return rclass;
19995 /* If this is a vector constant that can be formed with a few Altivec
19996 instructions, we want altivec registers. */
19997 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19998 return ALTIVEC_REGS;
20000 /* Force constant to memory. */
20001 return NO_REGS;
20004 /* D-form addressing can easily reload the value. */
20005 if (mode_supports_vmx_dform (mode)
20006 || mode_supports_vsx_dform_quad (mode))
20007 return rclass;
20009 /* If this is a scalar floating point value and we don't have D-form
20010 addressing, prefer the traditional floating point registers so that we
20011 can use D-form (register+offset) addressing. */
20012 if (GET_MODE_SIZE (mode) < 16 && rclass == VSX_REGS)
20013 return FLOAT_REGS;
20015 /* Prefer the Altivec registers if Altivec is handling the vector
20016 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20017 loads. */
20018 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20019 || mode == V1TImode)
20020 return ALTIVEC_REGS;
20022 return rclass;
20025 if (is_constant || GET_CODE (x) == PLUS)
20027 if (reg_class_subset_p (GENERAL_REGS, rclass))
20028 return GENERAL_REGS;
20029 if (reg_class_subset_p (BASE_REGS, rclass))
20030 return BASE_REGS;
20031 return NO_REGS;
20034 /* If we haven't picked a register class, and the type is a vector or
20035 floating point type, prefer to use the VSX, FPR, or Altivec register
20036 classes. */
20037 if (rclass == NO_REGS)
20039 if (TARGET_VSX && VECTOR_MEM_VSX_OR_P8_VECTOR_P (mode))
20040 return VSX_REGS;
20042 if (TARGET_ALTIVEC && VECTOR_MEM_ALTIVEC_P (mode))
20043 return ALTIVEC_REGS;
20045 if (DECIMAL_FLOAT_MODE_P (mode))
20046 return TARGET_DFP ? FLOAT_REGS : NO_REGS;
20048 if (TARGET_FPRS && TARGET_HARD_FLOAT && FLOAT_MODE_P (mode)
20049 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20050 return FLOAT_REGS;
20053 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20054 return GENERAL_REGS;
20056 return rclass;
20059 /* Debug version of rs6000_preferred_reload_class. */
20060 static enum reg_class
20061 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20063 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20065 fprintf (stderr,
20066 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20067 "mode = %s, x:\n",
20068 reg_class_names[ret], reg_class_names[rclass],
20069 GET_MODE_NAME (GET_MODE (x)));
20070 debug_rtx (x);
20072 return ret;
20075 /* If we are copying between FP or AltiVec registers and anything else, we need
20076 a memory location. The exception is when we are targeting ppc64 and the
20077 move to/from fpr to gpr instructions are available. Also, under VSX, you
20078 can copy vector registers from the FP register set to the Altivec register
20079 set and vice versa. */
20081 static bool
20082 rs6000_secondary_memory_needed (enum reg_class from_class,
20083 enum reg_class to_class,
20084 machine_mode mode)
20086 enum rs6000_reg_type from_type, to_type;
20087 bool altivec_p = ((from_class == ALTIVEC_REGS)
20088 || (to_class == ALTIVEC_REGS));
20090 /* If a simple/direct move is available, we don't need secondary memory */
20091 from_type = reg_class_to_reg_type[(int)from_class];
20092 to_type = reg_class_to_reg_type[(int)to_class];
20094 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20095 (secondary_reload_info *)0, altivec_p))
20096 return false;
20098 /* If we have a floating point or vector register class, we need to use
20099 memory to transfer the data. */
20100 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20101 return true;
20103 return false;
20106 /* Debug version of rs6000_secondary_memory_needed. */
20107 static bool
20108 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20109 enum reg_class to_class,
20110 machine_mode mode)
20112 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20114 fprintf (stderr,
20115 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20116 "to_class = %s, mode = %s\n",
20117 ret ? "true" : "false",
20118 reg_class_names[from_class],
20119 reg_class_names[to_class],
20120 GET_MODE_NAME (mode));
20122 return ret;
20125 /* Return the register class of a scratch register needed to copy IN into
20126 or out of a register in RCLASS in MODE. If it can be done directly,
20127 NO_REGS is returned. */
20129 static enum reg_class
20130 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20131 rtx in)
20133 int regno;
20135 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20136 #if TARGET_MACHO
20137 && MACHOPIC_INDIRECT
20138 #endif
20141 /* We cannot copy a symbolic operand directly into anything
20142 other than BASE_REGS for TARGET_ELF. So indicate that a
20143 register from BASE_REGS is needed as an intermediate
20144 register.
20146 On Darwin, pic addresses require a load from memory, which
20147 needs a base register. */
20148 if (rclass != BASE_REGS
20149 && (GET_CODE (in) == SYMBOL_REF
20150 || GET_CODE (in) == HIGH
20151 || GET_CODE (in) == LABEL_REF
20152 || GET_CODE (in) == CONST))
20153 return BASE_REGS;
20156 if (GET_CODE (in) == REG)
20158 regno = REGNO (in);
20159 if (regno >= FIRST_PSEUDO_REGISTER)
20161 regno = true_regnum (in);
20162 if (regno >= FIRST_PSEUDO_REGISTER)
20163 regno = -1;
20166 else if (GET_CODE (in) == SUBREG)
20168 regno = true_regnum (in);
20169 if (regno >= FIRST_PSEUDO_REGISTER)
20170 regno = -1;
20172 else
20173 regno = -1;
20175 /* If we have VSX register moves, prefer moving scalar values between
20176 Altivec registers and GPR by going via an FPR (and then via memory)
20177 instead of reloading the secondary memory address for Altivec moves. */
20178 if (TARGET_VSX
20179 && GET_MODE_SIZE (mode) < 16
20180 && !mode_supports_vmx_dform (mode)
20181 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20182 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20183 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20184 && (regno >= 0 && INT_REGNO_P (regno)))))
20185 return FLOAT_REGS;
20187 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20188 into anything. */
20189 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20190 || (regno >= 0 && INT_REGNO_P (regno)))
20191 return NO_REGS;
20193 /* Constants, memory, and VSX registers can go into VSX registers (both the
20194 traditional floating point and the altivec registers). */
20195 if (rclass == VSX_REGS
20196 && (regno == -1 || VSX_REGNO_P (regno)))
20197 return NO_REGS;
20199 /* Constants, memory, and FP registers can go into FP registers. */
20200 if ((regno == -1 || FP_REGNO_P (regno))
20201 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20202 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20204 /* Memory, and AltiVec registers can go into AltiVec registers. */
20205 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20206 && rclass == ALTIVEC_REGS)
20207 return NO_REGS;
20209 /* We can copy among the CR registers. */
20210 if ((rclass == CR_REGS || rclass == CR0_REGS)
20211 && regno >= 0 && CR_REGNO_P (regno))
20212 return NO_REGS;
20214 /* Otherwise, we need GENERAL_REGS. */
20215 return GENERAL_REGS;
20218 /* Debug version of rs6000_secondary_reload_class. */
20219 static enum reg_class
20220 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20221 machine_mode mode, rtx in)
20223 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20224 fprintf (stderr,
20225 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20226 "mode = %s, input rtx:\n",
20227 reg_class_names[ret], reg_class_names[rclass],
20228 GET_MODE_NAME (mode));
20229 debug_rtx (in);
20231 return ret;
20234 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20236 static bool
20237 rs6000_cannot_change_mode_class (machine_mode from,
20238 machine_mode to,
20239 enum reg_class rclass)
20241 unsigned from_size = GET_MODE_SIZE (from);
20242 unsigned to_size = GET_MODE_SIZE (to);
20244 if (from_size != to_size)
20246 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20248 if (reg_classes_intersect_p (xclass, rclass))
20250 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20251 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20252 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20253 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20255 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20256 single register under VSX because the scalar part of the register
20257 is in the upper 64-bits, and not the lower 64-bits. Types like
20258 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20259 IEEE floating point can't overlap, and neither can small
20260 values. */
20262 if (to_float128_vector_p && from_float128_vector_p)
20263 return false;
20265 else if (to_float128_vector_p || from_float128_vector_p)
20266 return true;
20268 /* TDmode in floating-mode registers must always go into a register
20269 pair with the most significant word in the even-numbered register
20270 to match ISA requirements. In little-endian mode, this does not
20271 match subreg numbering, so we cannot allow subregs. */
20272 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20273 return true;
20275 if (from_size < 8 || to_size < 8)
20276 return true;
20278 if (from_size == 8 && (8 * to_nregs) != to_size)
20279 return true;
20281 if (to_size == 8 && (8 * from_nregs) != from_size)
20282 return true;
20284 return false;
20286 else
20287 return false;
20290 if (TARGET_E500_DOUBLE
20291 && ((((to) == DFmode) + ((from) == DFmode)) == 1
20292 || (((to) == TFmode) + ((from) == TFmode)) == 1
20293 || (((to) == IFmode) + ((from) == IFmode)) == 1
20294 || (((to) == KFmode) + ((from) == KFmode)) == 1
20295 || (((to) == DDmode) + ((from) == DDmode)) == 1
20296 || (((to) == TDmode) + ((from) == TDmode)) == 1
20297 || (((to) == DImode) + ((from) == DImode)) == 1))
20298 return true;
20300 /* Since the VSX register set includes traditional floating point registers
20301 and altivec registers, just check for the size being different instead of
20302 trying to check whether the modes are vector modes. Otherwise it won't
20303 allow say DF and DI to change classes. For types like TFmode and TDmode
20304 that take 2 64-bit registers, rather than a single 128-bit register, don't
20305 allow subregs of those types to other 128 bit types. */
20306 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20308 unsigned num_regs = (from_size + 15) / 16;
20309 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20310 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20311 return true;
20313 return (from_size != 8 && from_size != 16);
20316 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20317 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20318 return true;
20320 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
20321 && reg_classes_intersect_p (GENERAL_REGS, rclass))
20322 return true;
20324 return false;
20327 /* Debug version of rs6000_cannot_change_mode_class. */
20328 static bool
20329 rs6000_debug_cannot_change_mode_class (machine_mode from,
20330 machine_mode to,
20331 enum reg_class rclass)
20333 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20335 fprintf (stderr,
20336 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20337 "to = %s, rclass = %s\n",
20338 ret ? "true" : "false",
20339 GET_MODE_NAME (from), GET_MODE_NAME (to),
20340 reg_class_names[rclass]);
20342 return ret;
20345 /* Return a string to do a move operation of 128 bits of data. */
20347 const char *
20348 rs6000_output_move_128bit (rtx operands[])
20350 rtx dest = operands[0];
20351 rtx src = operands[1];
20352 machine_mode mode = GET_MODE (dest);
20353 int dest_regno;
20354 int src_regno;
20355 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20356 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20358 if (REG_P (dest))
20360 dest_regno = REGNO (dest);
20361 dest_gpr_p = INT_REGNO_P (dest_regno);
20362 dest_fp_p = FP_REGNO_P (dest_regno);
20363 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20364 dest_vsx_p = dest_fp_p | dest_vmx_p;
20366 else
20368 dest_regno = -1;
20369 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20372 if (REG_P (src))
20374 src_regno = REGNO (src);
20375 src_gpr_p = INT_REGNO_P (src_regno);
20376 src_fp_p = FP_REGNO_P (src_regno);
20377 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20378 src_vsx_p = src_fp_p | src_vmx_p;
20380 else
20382 src_regno = -1;
20383 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20386 /* Register moves. */
20387 if (dest_regno >= 0 && src_regno >= 0)
20389 if (dest_gpr_p)
20391 if (src_gpr_p)
20392 return "#";
20394 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20395 return (WORDS_BIG_ENDIAN
20396 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20397 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20399 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20400 return "#";
20403 else if (TARGET_VSX && dest_vsx_p)
20405 if (src_vsx_p)
20406 return "xxlor %x0,%x1,%x1";
20408 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20409 return (WORDS_BIG_ENDIAN
20410 ? "mtvsrdd %x0,%1,%L1"
20411 : "mtvsrdd %x0,%L1,%1");
20413 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20414 return "#";
20417 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20418 return "vor %0,%1,%1";
20420 else if (dest_fp_p && src_fp_p)
20421 return "#";
20424 /* Loads. */
20425 else if (dest_regno >= 0 && MEM_P (src))
20427 if (dest_gpr_p)
20429 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20430 return "lq %0,%1";
20431 else
20432 return "#";
20435 else if (TARGET_ALTIVEC && dest_vmx_p
20436 && altivec_indexed_or_indirect_operand (src, mode))
20437 return "lvx %0,%y1";
20439 else if (TARGET_VSX && dest_vsx_p)
20441 if (mode_supports_vsx_dform_quad (mode)
20442 && quad_address_p (XEXP (src, 0), mode, true))
20443 return "lxv %x0,%1";
20445 else if (TARGET_P9_VECTOR)
20446 return "lxvx %x0,%y1";
20448 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20449 return "lxvw4x %x0,%y1";
20451 else
20452 return "lxvd2x %x0,%y1";
20455 else if (TARGET_ALTIVEC && dest_vmx_p)
20456 return "lvx %0,%y1";
20458 else if (dest_fp_p)
20459 return "#";
20462 /* Stores. */
20463 else if (src_regno >= 0 && MEM_P (dest))
20465 if (src_gpr_p)
20467 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20468 return "stq %1,%0";
20469 else
20470 return "#";
20473 else if (TARGET_ALTIVEC && src_vmx_p
20474 && altivec_indexed_or_indirect_operand (src, mode))
20475 return "stvx %1,%y0";
20477 else if (TARGET_VSX && src_vsx_p)
20479 if (mode_supports_vsx_dform_quad (mode)
20480 && quad_address_p (XEXP (dest, 0), mode, true))
20481 return "stxv %x1,%0";
20483 else if (TARGET_P9_VECTOR)
20484 return "stxvx %x1,%y0";
20486 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20487 return "stxvw4x %x1,%y0";
20489 else
20490 return "stxvd2x %x1,%y0";
20493 else if (TARGET_ALTIVEC && src_vmx_p)
20494 return "stvx %1,%y0";
20496 else if (src_fp_p)
20497 return "#";
20500 /* Constants. */
20501 else if (dest_regno >= 0
20502 && (GET_CODE (src) == CONST_INT
20503 || GET_CODE (src) == CONST_WIDE_INT
20504 || GET_CODE (src) == CONST_DOUBLE
20505 || GET_CODE (src) == CONST_VECTOR))
20507 if (dest_gpr_p)
20508 return "#";
20510 else if ((dest_vmx_p && TARGET_ALTIVEC)
20511 || (dest_vsx_p && TARGET_VSX))
20512 return output_vec_const_move (operands);
20515 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20518 /* Validate a 128-bit move. */
20519 bool
20520 rs6000_move_128bit_ok_p (rtx operands[])
20522 machine_mode mode = GET_MODE (operands[0]);
20523 return (gpc_reg_operand (operands[0], mode)
20524 || gpc_reg_operand (operands[1], mode));
20527 /* Return true if a 128-bit move needs to be split. */
20528 bool
20529 rs6000_split_128bit_ok_p (rtx operands[])
20531 if (!reload_completed)
20532 return false;
20534 if (!gpr_or_gpr_p (operands[0], operands[1]))
20535 return false;
20537 if (quad_load_store_p (operands[0], operands[1]))
20538 return false;
20540 return true;
20544 /* Given a comparison operation, return the bit number in CCR to test. We
20545 know this is a valid comparison.
20547 SCC_P is 1 if this is for an scc. That means that %D will have been
20548 used instead of %C, so the bits will be in different places.
20550 Return -1 if OP isn't a valid comparison for some reason. */
20553 ccr_bit (rtx op, int scc_p)
20555 enum rtx_code code = GET_CODE (op);
20556 machine_mode cc_mode;
20557 int cc_regnum;
20558 int base_bit;
20559 rtx reg;
20561 if (!COMPARISON_P (op))
20562 return -1;
20564 reg = XEXP (op, 0);
20566 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20568 cc_mode = GET_MODE (reg);
20569 cc_regnum = REGNO (reg);
20570 base_bit = 4 * (cc_regnum - CR0_REGNO);
20572 validate_condition_mode (code, cc_mode);
20574 /* When generating a sCOND operation, only positive conditions are
20575 allowed. */
20576 gcc_assert (!scc_p
20577 || code == EQ || code == GT || code == LT || code == UNORDERED
20578 || code == GTU || code == LTU);
20580 switch (code)
20582 case NE:
20583 return scc_p ? base_bit + 3 : base_bit + 2;
20584 case EQ:
20585 return base_bit + 2;
20586 case GT: case GTU: case UNLE:
20587 return base_bit + 1;
20588 case LT: case LTU: case UNGE:
20589 return base_bit;
20590 case ORDERED: case UNORDERED:
20591 return base_bit + 3;
20593 case GE: case GEU:
20594 /* If scc, we will have done a cror to put the bit in the
20595 unordered position. So test that bit. For integer, this is ! LT
20596 unless this is an scc insn. */
20597 return scc_p ? base_bit + 3 : base_bit;
20599 case LE: case LEU:
20600 return scc_p ? base_bit + 3 : base_bit + 1;
20602 default:
20603 gcc_unreachable ();
20607 /* Return the GOT register. */
20610 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20612 /* The second flow pass currently (June 1999) can't update
20613 regs_ever_live without disturbing other parts of the compiler, so
20614 update it here to make the prolog/epilogue code happy. */
20615 if (!can_create_pseudo_p ()
20616 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20617 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20619 crtl->uses_pic_offset_table = 1;
20621 return pic_offset_table_rtx;
20624 static rs6000_stack_t stack_info;
20626 /* Function to init struct machine_function.
20627 This will be called, via a pointer variable,
20628 from push_function_context. */
20630 static struct machine_function *
20631 rs6000_init_machine_status (void)
20633 stack_info.reload_completed = 0;
20634 return ggc_cleared_alloc<machine_function> ();
20637 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20639 /* Write out a function code label. */
20641 void
20642 rs6000_output_function_entry (FILE *file, const char *fname)
20644 if (fname[0] != '.')
20646 switch (DEFAULT_ABI)
20648 default:
20649 gcc_unreachable ();
20651 case ABI_AIX:
20652 if (DOT_SYMBOLS)
20653 putc ('.', file);
20654 else
20655 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20656 break;
20658 case ABI_ELFv2:
20659 case ABI_V4:
20660 case ABI_DARWIN:
20661 break;
20665 RS6000_OUTPUT_BASENAME (file, fname);
20668 /* Print an operand. Recognize special options, documented below. */
20670 #if TARGET_ELF
20671 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20672 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20673 #else
20674 #define SMALL_DATA_RELOC "sda21"
20675 #define SMALL_DATA_REG 0
20676 #endif
20678 void
20679 print_operand (FILE *file, rtx x, int code)
20681 int i;
20682 unsigned HOST_WIDE_INT uval;
20684 switch (code)
20686 /* %a is output_address. */
20688 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20689 output_operand. */
20691 case 'D':
20692 /* Like 'J' but get to the GT bit only. */
20693 gcc_assert (REG_P (x));
20695 /* Bit 1 is GT bit. */
20696 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20698 /* Add one for shift count in rlinm for scc. */
20699 fprintf (file, "%d", i + 1);
20700 return;
20702 case 'e':
20703 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20704 if (! INT_P (x))
20706 output_operand_lossage ("invalid %%e value");
20707 return;
20710 uval = INTVAL (x);
20711 if ((uval & 0xffff) == 0 && uval != 0)
20712 putc ('s', file);
20713 return;
20715 case 'E':
20716 /* X is a CR register. Print the number of the EQ bit of the CR */
20717 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20718 output_operand_lossage ("invalid %%E value");
20719 else
20720 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20721 return;
20723 case 'f':
20724 /* X is a CR register. Print the shift count needed to move it
20725 to the high-order four bits. */
20726 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20727 output_operand_lossage ("invalid %%f value");
20728 else
20729 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20730 return;
20732 case 'F':
20733 /* Similar, but print the count for the rotate in the opposite
20734 direction. */
20735 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20736 output_operand_lossage ("invalid %%F value");
20737 else
20738 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20739 return;
20741 case 'G':
20742 /* X is a constant integer. If it is negative, print "m",
20743 otherwise print "z". This is to make an aze or ame insn. */
20744 if (GET_CODE (x) != CONST_INT)
20745 output_operand_lossage ("invalid %%G value");
20746 else if (INTVAL (x) >= 0)
20747 putc ('z', file);
20748 else
20749 putc ('m', file);
20750 return;
20752 case 'h':
20753 /* If constant, output low-order five bits. Otherwise, write
20754 normally. */
20755 if (INT_P (x))
20756 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20757 else
20758 print_operand (file, x, 0);
20759 return;
20761 case 'H':
20762 /* If constant, output low-order six bits. Otherwise, write
20763 normally. */
20764 if (INT_P (x))
20765 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20766 else
20767 print_operand (file, x, 0);
20768 return;
20770 case 'I':
20771 /* Print `i' if this is a constant, else nothing. */
20772 if (INT_P (x))
20773 putc ('i', file);
20774 return;
20776 case 'j':
20777 /* Write the bit number in CCR for jump. */
20778 i = ccr_bit (x, 0);
20779 if (i == -1)
20780 output_operand_lossage ("invalid %%j code");
20781 else
20782 fprintf (file, "%d", i);
20783 return;
20785 case 'J':
20786 /* Similar, but add one for shift count in rlinm for scc and pass
20787 scc flag to `ccr_bit'. */
20788 i = ccr_bit (x, 1);
20789 if (i == -1)
20790 output_operand_lossage ("invalid %%J code");
20791 else
20792 /* If we want bit 31, write a shift count of zero, not 32. */
20793 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20794 return;
20796 case 'k':
20797 /* X must be a constant. Write the 1's complement of the
20798 constant. */
20799 if (! INT_P (x))
20800 output_operand_lossage ("invalid %%k value");
20801 else
20802 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20803 return;
20805 case 'K':
20806 /* X must be a symbolic constant on ELF. Write an
20807 expression suitable for an 'addi' that adds in the low 16
20808 bits of the MEM. */
20809 if (GET_CODE (x) == CONST)
20811 if (GET_CODE (XEXP (x, 0)) != PLUS
20812 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20813 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20814 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20815 output_operand_lossage ("invalid %%K value");
20817 print_operand_address (file, x);
20818 fputs ("@l", file);
20819 return;
20821 /* %l is output_asm_label. */
20823 case 'L':
20824 /* Write second word of DImode or DFmode reference. Works on register
20825 or non-indexed memory only. */
20826 if (REG_P (x))
20827 fputs (reg_names[REGNO (x) + 1], file);
20828 else if (MEM_P (x))
20830 machine_mode mode = GET_MODE (x);
20831 /* Handle possible auto-increment. Since it is pre-increment and
20832 we have already done it, we can just use an offset of word. */
20833 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20834 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20835 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20836 UNITS_PER_WORD));
20837 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20838 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20839 UNITS_PER_WORD));
20840 else
20841 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20842 UNITS_PER_WORD),
20843 0));
20845 if (small_data_operand (x, GET_MODE (x)))
20846 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20847 reg_names[SMALL_DATA_REG]);
20849 return;
20851 case 'N':
20852 /* Write the number of elements in the vector times 4. */
20853 if (GET_CODE (x) != PARALLEL)
20854 output_operand_lossage ("invalid %%N value");
20855 else
20856 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20857 return;
20859 case 'O':
20860 /* Similar, but subtract 1 first. */
20861 if (GET_CODE (x) != PARALLEL)
20862 output_operand_lossage ("invalid %%O value");
20863 else
20864 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20865 return;
20867 case 'p':
20868 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20869 if (! INT_P (x)
20870 || INTVAL (x) < 0
20871 || (i = exact_log2 (INTVAL (x))) < 0)
20872 output_operand_lossage ("invalid %%p value");
20873 else
20874 fprintf (file, "%d", i);
20875 return;
20877 case 'P':
20878 /* The operand must be an indirect memory reference. The result
20879 is the register name. */
20880 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20881 || REGNO (XEXP (x, 0)) >= 32)
20882 output_operand_lossage ("invalid %%P value");
20883 else
20884 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20885 return;
20887 case 'q':
20888 /* This outputs the logical code corresponding to a boolean
20889 expression. The expression may have one or both operands
20890 negated (if one, only the first one). For condition register
20891 logical operations, it will also treat the negated
20892 CR codes as NOTs, but not handle NOTs of them. */
20894 const char *const *t = 0;
20895 const char *s;
20896 enum rtx_code code = GET_CODE (x);
20897 static const char * const tbl[3][3] = {
20898 { "and", "andc", "nor" },
20899 { "or", "orc", "nand" },
20900 { "xor", "eqv", "xor" } };
20902 if (code == AND)
20903 t = tbl[0];
20904 else if (code == IOR)
20905 t = tbl[1];
20906 else if (code == XOR)
20907 t = tbl[2];
20908 else
20909 output_operand_lossage ("invalid %%q value");
20911 if (GET_CODE (XEXP (x, 0)) != NOT)
20912 s = t[0];
20913 else
20915 if (GET_CODE (XEXP (x, 1)) == NOT)
20916 s = t[2];
20917 else
20918 s = t[1];
20921 fputs (s, file);
20923 return;
20925 case 'Q':
20926 if (! TARGET_MFCRF)
20927 return;
20928 fputc (',', file);
20929 /* FALLTHRU */
20931 case 'R':
20932 /* X is a CR register. Print the mask for `mtcrf'. */
20933 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20934 output_operand_lossage ("invalid %%R value");
20935 else
20936 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20937 return;
20939 case 's':
20940 /* Low 5 bits of 32 - value */
20941 if (! INT_P (x))
20942 output_operand_lossage ("invalid %%s value");
20943 else
20944 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20945 return;
20947 case 't':
20948 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20949 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
20951 /* Bit 3 is OV bit. */
20952 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20954 /* If we want bit 31, write a shift count of zero, not 32. */
20955 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20956 return;
20958 case 'T':
20959 /* Print the symbolic name of a branch target register. */
20960 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20961 && REGNO (x) != CTR_REGNO))
20962 output_operand_lossage ("invalid %%T value");
20963 else if (REGNO (x) == LR_REGNO)
20964 fputs ("lr", file);
20965 else
20966 fputs ("ctr", file);
20967 return;
20969 case 'u':
20970 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20971 for use in unsigned operand. */
20972 if (! INT_P (x))
20974 output_operand_lossage ("invalid %%u value");
20975 return;
20978 uval = INTVAL (x);
20979 if ((uval & 0xffff) == 0)
20980 uval >>= 16;
20982 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20983 return;
20985 case 'v':
20986 /* High-order 16 bits of constant for use in signed operand. */
20987 if (! INT_P (x))
20988 output_operand_lossage ("invalid %%v value");
20989 else
20990 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20991 (INTVAL (x) >> 16) & 0xffff);
20992 return;
20994 case 'U':
20995 /* Print `u' if this has an auto-increment or auto-decrement. */
20996 if (MEM_P (x)
20997 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20998 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20999 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21000 putc ('u', file);
21001 return;
21003 case 'V':
21004 /* Print the trap code for this operand. */
21005 switch (GET_CODE (x))
21007 case EQ:
21008 fputs ("eq", file); /* 4 */
21009 break;
21010 case NE:
21011 fputs ("ne", file); /* 24 */
21012 break;
21013 case LT:
21014 fputs ("lt", file); /* 16 */
21015 break;
21016 case LE:
21017 fputs ("le", file); /* 20 */
21018 break;
21019 case GT:
21020 fputs ("gt", file); /* 8 */
21021 break;
21022 case GE:
21023 fputs ("ge", file); /* 12 */
21024 break;
21025 case LTU:
21026 fputs ("llt", file); /* 2 */
21027 break;
21028 case LEU:
21029 fputs ("lle", file); /* 6 */
21030 break;
21031 case GTU:
21032 fputs ("lgt", file); /* 1 */
21033 break;
21034 case GEU:
21035 fputs ("lge", file); /* 5 */
21036 break;
21037 default:
21038 gcc_unreachable ();
21040 break;
21042 case 'w':
21043 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21044 normally. */
21045 if (INT_P (x))
21046 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21047 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21048 else
21049 print_operand (file, x, 0);
21050 return;
21052 case 'x':
21053 /* X is a FPR or Altivec register used in a VSX context. */
21054 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21055 output_operand_lossage ("invalid %%x value");
21056 else
21058 int reg = REGNO (x);
21059 int vsx_reg = (FP_REGNO_P (reg)
21060 ? reg - 32
21061 : reg - FIRST_ALTIVEC_REGNO + 32);
21063 #ifdef TARGET_REGNAMES
21064 if (TARGET_REGNAMES)
21065 fprintf (file, "%%vs%d", vsx_reg);
21066 else
21067 #endif
21068 fprintf (file, "%d", vsx_reg);
21070 return;
21072 case 'X':
21073 if (MEM_P (x)
21074 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21075 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21076 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21077 putc ('x', file);
21078 return;
21080 case 'Y':
21081 /* Like 'L', for third word of TImode/PTImode */
21082 if (REG_P (x))
21083 fputs (reg_names[REGNO (x) + 2], file);
21084 else if (MEM_P (x))
21086 machine_mode mode = GET_MODE (x);
21087 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21088 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21089 output_address (mode, plus_constant (Pmode,
21090 XEXP (XEXP (x, 0), 0), 8));
21091 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21092 output_address (mode, plus_constant (Pmode,
21093 XEXP (XEXP (x, 0), 0), 8));
21094 else
21095 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21096 if (small_data_operand (x, GET_MODE (x)))
21097 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21098 reg_names[SMALL_DATA_REG]);
21100 return;
21102 case 'z':
21103 /* X is a SYMBOL_REF. Write out the name preceded by a
21104 period and without any trailing data in brackets. Used for function
21105 names. If we are configured for System V (or the embedded ABI) on
21106 the PowerPC, do not emit the period, since those systems do not use
21107 TOCs and the like. */
21108 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21110 /* For macho, check to see if we need a stub. */
21111 if (TARGET_MACHO)
21113 const char *name = XSTR (x, 0);
21114 #if TARGET_MACHO
21115 if (darwin_emit_branch_islands
21116 && MACHOPIC_INDIRECT
21117 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21118 name = machopic_indirection_name (x, /*stub_p=*/true);
21119 #endif
21120 assemble_name (file, name);
21122 else if (!DOT_SYMBOLS)
21123 assemble_name (file, XSTR (x, 0));
21124 else
21125 rs6000_output_function_entry (file, XSTR (x, 0));
21126 return;
21128 case 'Z':
21129 /* Like 'L', for last word of TImode/PTImode. */
21130 if (REG_P (x))
21131 fputs (reg_names[REGNO (x) + 3], file);
21132 else if (MEM_P (x))
21134 machine_mode mode = GET_MODE (x);
21135 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21136 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21137 output_address (mode, plus_constant (Pmode,
21138 XEXP (XEXP (x, 0), 0), 12));
21139 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21140 output_address (mode, plus_constant (Pmode,
21141 XEXP (XEXP (x, 0), 0), 12));
21142 else
21143 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21144 if (small_data_operand (x, GET_MODE (x)))
21145 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21146 reg_names[SMALL_DATA_REG]);
21148 return;
21150 /* Print AltiVec or SPE memory operand. */
21151 case 'y':
21153 rtx tmp;
21155 gcc_assert (MEM_P (x));
21157 tmp = XEXP (x, 0);
21159 /* Ugly hack because %y is overloaded. */
21160 if ((TARGET_SPE || TARGET_E500_DOUBLE)
21161 && (GET_MODE_SIZE (GET_MODE (x)) == 8
21162 || FLOAT128_2REG_P (GET_MODE (x))
21163 || GET_MODE (x) == TImode
21164 || GET_MODE (x) == PTImode))
21166 /* Handle [reg]. */
21167 if (REG_P (tmp))
21169 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
21170 break;
21172 /* Handle [reg+UIMM]. */
21173 else if (GET_CODE (tmp) == PLUS &&
21174 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
21176 int x;
21178 gcc_assert (REG_P (XEXP (tmp, 0)));
21180 x = INTVAL (XEXP (tmp, 1));
21181 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
21182 break;
21185 /* Fall through. Must be [reg+reg]. */
21187 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21188 && GET_CODE (tmp) == AND
21189 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21190 && INTVAL (XEXP (tmp, 1)) == -16)
21191 tmp = XEXP (tmp, 0);
21192 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21193 && GET_CODE (tmp) == PRE_MODIFY)
21194 tmp = XEXP (tmp, 1);
21195 if (REG_P (tmp))
21196 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21197 else
21199 if (GET_CODE (tmp) != PLUS
21200 || !REG_P (XEXP (tmp, 0))
21201 || !REG_P (XEXP (tmp, 1)))
21203 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21204 break;
21207 if (REGNO (XEXP (tmp, 0)) == 0)
21208 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21209 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21210 else
21211 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21212 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21214 break;
21217 case 0:
21218 if (REG_P (x))
21219 fprintf (file, "%s", reg_names[REGNO (x)]);
21220 else if (MEM_P (x))
21222 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21223 know the width from the mode. */
21224 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21225 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21226 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21227 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21228 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21229 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21230 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21231 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21232 else
21233 output_address (GET_MODE (x), XEXP (x, 0));
21235 else
21237 if (toc_relative_expr_p (x, false))
21238 /* This hack along with a corresponding hack in
21239 rs6000_output_addr_const_extra arranges to output addends
21240 where the assembler expects to find them. eg.
21241 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21242 without this hack would be output as "x@toc+4". We
21243 want "x+4@toc". */
21244 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21245 else
21246 output_addr_const (file, x);
21248 return;
21250 case '&':
21251 if (const char *name = get_some_local_dynamic_name ())
21252 assemble_name (file, name);
21253 else
21254 output_operand_lossage ("'%%&' used without any "
21255 "local dynamic TLS references");
21256 return;
21258 default:
21259 output_operand_lossage ("invalid %%xn code");
21263 /* Print the address of an operand. */
21265 void
21266 print_operand_address (FILE *file, rtx x)
21268 if (REG_P (x))
21269 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21270 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21271 || GET_CODE (x) == LABEL_REF)
21273 output_addr_const (file, x);
21274 if (small_data_operand (x, GET_MODE (x)))
21275 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21276 reg_names[SMALL_DATA_REG]);
21277 else
21278 gcc_assert (!TARGET_TOC);
21280 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21281 && REG_P (XEXP (x, 1)))
21283 if (REGNO (XEXP (x, 0)) == 0)
21284 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21285 reg_names[ REGNO (XEXP (x, 0)) ]);
21286 else
21287 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21288 reg_names[ REGNO (XEXP (x, 1)) ]);
21290 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21291 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21292 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21293 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21294 #if TARGET_MACHO
21295 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21296 && CONSTANT_P (XEXP (x, 1)))
21298 fprintf (file, "lo16(");
21299 output_addr_const (file, XEXP (x, 1));
21300 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21302 #endif
21303 #if TARGET_ELF
21304 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21305 && CONSTANT_P (XEXP (x, 1)))
21307 output_addr_const (file, XEXP (x, 1));
21308 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21310 #endif
21311 else if (toc_relative_expr_p (x, false))
21313 /* This hack along with a corresponding hack in
21314 rs6000_output_addr_const_extra arranges to output addends
21315 where the assembler expects to find them. eg.
21316 (lo_sum (reg 9)
21317 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21318 without this hack would be output as "x@toc+8@l(9)". We
21319 want "x+8@toc@l(9)". */
21320 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
21321 if (GET_CODE (x) == LO_SUM)
21322 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21323 else
21324 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
21326 else
21327 gcc_unreachable ();
21330 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
21332 static bool
21333 rs6000_output_addr_const_extra (FILE *file, rtx x)
21335 if (GET_CODE (x) == UNSPEC)
21336 switch (XINT (x, 1))
21338 case UNSPEC_TOCREL:
21339 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21340 && REG_P (XVECEXP (x, 0, 1))
21341 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21342 output_addr_const (file, XVECEXP (x, 0, 0));
21343 if (x == tocrel_base && tocrel_offset != const0_rtx)
21345 if (INTVAL (tocrel_offset) >= 0)
21346 fprintf (file, "+");
21347 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
21349 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21351 putc ('-', file);
21352 assemble_name (file, toc_label_name);
21353 need_toc_init = 1;
21355 else if (TARGET_ELF)
21356 fputs ("@toc", file);
21357 return true;
21359 #if TARGET_MACHO
21360 case UNSPEC_MACHOPIC_OFFSET:
21361 output_addr_const (file, XVECEXP (x, 0, 0));
21362 putc ('-', file);
21363 machopic_output_function_base_name (file);
21364 return true;
21365 #endif
21367 return false;
21370 /* Target hook for assembling integer objects. The PowerPC version has
21371 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21372 is defined. It also needs to handle DI-mode objects on 64-bit
21373 targets. */
21375 static bool
21376 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21378 #ifdef RELOCATABLE_NEEDS_FIXUP
21379 /* Special handling for SI values. */
21380 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21382 static int recurse = 0;
21384 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21385 the .fixup section. Since the TOC section is already relocated, we
21386 don't need to mark it here. We used to skip the text section, but it
21387 should never be valid for relocated addresses to be placed in the text
21388 section. */
21389 if (DEFAULT_ABI == ABI_V4
21390 && (TARGET_RELOCATABLE || flag_pic > 1)
21391 && in_section != toc_section
21392 && !recurse
21393 && !CONST_SCALAR_INT_P (x)
21394 && CONSTANT_P (x))
21396 char buf[256];
21398 recurse = 1;
21399 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21400 fixuplabelno++;
21401 ASM_OUTPUT_LABEL (asm_out_file, buf);
21402 fprintf (asm_out_file, "\t.long\t(");
21403 output_addr_const (asm_out_file, x);
21404 fprintf (asm_out_file, ")@fixup\n");
21405 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21406 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21407 fprintf (asm_out_file, "\t.long\t");
21408 assemble_name (asm_out_file, buf);
21409 fprintf (asm_out_file, "\n\t.previous\n");
21410 recurse = 0;
21411 return true;
21413 /* Remove initial .'s to turn a -mcall-aixdesc function
21414 address into the address of the descriptor, not the function
21415 itself. */
21416 else if (GET_CODE (x) == SYMBOL_REF
21417 && XSTR (x, 0)[0] == '.'
21418 && DEFAULT_ABI == ABI_AIX)
21420 const char *name = XSTR (x, 0);
21421 while (*name == '.')
21422 name++;
21424 fprintf (asm_out_file, "\t.long\t%s\n", name);
21425 return true;
21428 #endif /* RELOCATABLE_NEEDS_FIXUP */
21429 return default_assemble_integer (x, size, aligned_p);
21432 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21433 /* Emit an assembler directive to set symbol visibility for DECL to
21434 VISIBILITY_TYPE. */
21436 static void
21437 rs6000_assemble_visibility (tree decl, int vis)
21439 if (TARGET_XCOFF)
21440 return;
21442 /* Functions need to have their entry point symbol visibility set as
21443 well as their descriptor symbol visibility. */
21444 if (DEFAULT_ABI == ABI_AIX
21445 && DOT_SYMBOLS
21446 && TREE_CODE (decl) == FUNCTION_DECL)
21448 static const char * const visibility_types[] = {
21449 NULL, "internal", "hidden", "protected"
21452 const char *name, *type;
21454 name = ((* targetm.strip_name_encoding)
21455 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21456 type = visibility_types[vis];
21458 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21459 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21461 else
21462 default_assemble_visibility (decl, vis);
21464 #endif
21466 enum rtx_code
21467 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21469 /* Reversal of FP compares takes care -- an ordered compare
21470 becomes an unordered compare and vice versa. */
21471 if (mode == CCFPmode
21472 && (!flag_finite_math_only
21473 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21474 || code == UNEQ || code == LTGT))
21475 return reverse_condition_maybe_unordered (code);
21476 else
21477 return reverse_condition (code);
21480 /* Generate a compare for CODE. Return a brand-new rtx that
21481 represents the result of the compare. */
21483 static rtx
21484 rs6000_generate_compare (rtx cmp, machine_mode mode)
21486 machine_mode comp_mode;
21487 rtx compare_result;
21488 enum rtx_code code = GET_CODE (cmp);
21489 rtx op0 = XEXP (cmp, 0);
21490 rtx op1 = XEXP (cmp, 1);
21492 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21493 comp_mode = CCmode;
21494 else if (FLOAT_MODE_P (mode))
21495 comp_mode = CCFPmode;
21496 else if (code == GTU || code == LTU
21497 || code == GEU || code == LEU)
21498 comp_mode = CCUNSmode;
21499 else if ((code == EQ || code == NE)
21500 && unsigned_reg_p (op0)
21501 && (unsigned_reg_p (op1)
21502 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21503 /* These are unsigned values, perhaps there will be a later
21504 ordering compare that can be shared with this one. */
21505 comp_mode = CCUNSmode;
21506 else
21507 comp_mode = CCmode;
21509 /* If we have an unsigned compare, make sure we don't have a signed value as
21510 an immediate. */
21511 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21512 && INTVAL (op1) < 0)
21514 op0 = copy_rtx_if_shared (op0);
21515 op1 = force_reg (GET_MODE (op0), op1);
21516 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21519 /* First, the compare. */
21520 compare_result = gen_reg_rtx (comp_mode);
21522 /* E500 FP compare instructions on the GPRs. Yuck! */
21523 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
21524 && FLOAT_MODE_P (mode))
21526 rtx cmp, or_result, compare_result2;
21527 machine_mode op_mode = GET_MODE (op0);
21528 bool reverse_p;
21530 if (op_mode == VOIDmode)
21531 op_mode = GET_MODE (op1);
21533 /* First reverse the condition codes that aren't directly supported. */
21534 switch (code)
21536 case NE:
21537 case UNLT:
21538 case UNLE:
21539 case UNGT:
21540 case UNGE:
21541 code = reverse_condition_maybe_unordered (code);
21542 reverse_p = true;
21543 break;
21545 case EQ:
21546 case LT:
21547 case LE:
21548 case GT:
21549 case GE:
21550 reverse_p = false;
21551 break;
21553 default:
21554 gcc_unreachable ();
21557 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
21558 This explains the following mess. */
21560 switch (code)
21562 case EQ:
21563 switch (op_mode)
21565 case SFmode:
21566 cmp = (flag_finite_math_only && !flag_trapping_math)
21567 ? gen_tstsfeq_gpr (compare_result, op0, op1)
21568 : gen_cmpsfeq_gpr (compare_result, op0, op1);
21569 break;
21571 case DFmode:
21572 cmp = (flag_finite_math_only && !flag_trapping_math)
21573 ? gen_tstdfeq_gpr (compare_result, op0, op1)
21574 : gen_cmpdfeq_gpr (compare_result, op0, op1);
21575 break;
21577 case TFmode:
21578 case IFmode:
21579 case KFmode:
21580 cmp = (flag_finite_math_only && !flag_trapping_math)
21581 ? gen_tsttfeq_gpr (compare_result, op0, op1)
21582 : gen_cmptfeq_gpr (compare_result, op0, op1);
21583 break;
21585 default:
21586 gcc_unreachable ();
21588 break;
21590 case GT:
21591 case GE:
21592 switch (op_mode)
21594 case SFmode:
21595 cmp = (flag_finite_math_only && !flag_trapping_math)
21596 ? gen_tstsfgt_gpr (compare_result, op0, op1)
21597 : gen_cmpsfgt_gpr (compare_result, op0, op1);
21598 break;
21600 case DFmode:
21601 cmp = (flag_finite_math_only && !flag_trapping_math)
21602 ? gen_tstdfgt_gpr (compare_result, op0, op1)
21603 : gen_cmpdfgt_gpr (compare_result, op0, op1);
21604 break;
21606 case TFmode:
21607 case IFmode:
21608 case KFmode:
21609 cmp = (flag_finite_math_only && !flag_trapping_math)
21610 ? gen_tsttfgt_gpr (compare_result, op0, op1)
21611 : gen_cmptfgt_gpr (compare_result, op0, op1);
21612 break;
21614 default:
21615 gcc_unreachable ();
21617 break;
21619 case LT:
21620 case LE:
21621 switch (op_mode)
21623 case SFmode:
21624 cmp = (flag_finite_math_only && !flag_trapping_math)
21625 ? gen_tstsflt_gpr (compare_result, op0, op1)
21626 : gen_cmpsflt_gpr (compare_result, op0, op1);
21627 break;
21629 case DFmode:
21630 cmp = (flag_finite_math_only && !flag_trapping_math)
21631 ? gen_tstdflt_gpr (compare_result, op0, op1)
21632 : gen_cmpdflt_gpr (compare_result, op0, op1);
21633 break;
21635 case TFmode:
21636 case IFmode:
21637 case KFmode:
21638 cmp = (flag_finite_math_only && !flag_trapping_math)
21639 ? gen_tsttflt_gpr (compare_result, op0, op1)
21640 : gen_cmptflt_gpr (compare_result, op0, op1);
21641 break;
21643 default:
21644 gcc_unreachable ();
21646 break;
21648 default:
21649 gcc_unreachable ();
21652 /* Synthesize LE and GE from LT/GT || EQ. */
21653 if (code == LE || code == GE)
21655 emit_insn (cmp);
21657 compare_result2 = gen_reg_rtx (CCFPmode);
21659 /* Do the EQ. */
21660 switch (op_mode)
21662 case SFmode:
21663 cmp = (flag_finite_math_only && !flag_trapping_math)
21664 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
21665 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
21666 break;
21668 case DFmode:
21669 cmp = (flag_finite_math_only && !flag_trapping_math)
21670 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
21671 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
21672 break;
21674 case TFmode:
21675 case IFmode:
21676 case KFmode:
21677 cmp = (flag_finite_math_only && !flag_trapping_math)
21678 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
21679 : gen_cmptfeq_gpr (compare_result2, op0, op1);
21680 break;
21682 default:
21683 gcc_unreachable ();
21686 emit_insn (cmp);
21688 /* OR them together. */
21689 or_result = gen_reg_rtx (CCFPmode);
21690 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
21691 compare_result2);
21692 compare_result = or_result;
21695 code = reverse_p ? NE : EQ;
21697 emit_insn (cmp);
21700 /* IEEE 128-bit support in VSX registers when we do not have hardware
21701 support. */
21702 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21704 rtx libfunc = NULL_RTX;
21705 bool uneq_or_ltgt = false;
21706 rtx dest = gen_reg_rtx (SImode);
21708 switch (code)
21710 case EQ:
21711 case NE:
21712 libfunc = optab_libfunc (eq_optab, mode);
21713 break;
21715 case GT:
21716 case GE:
21717 libfunc = optab_libfunc (ge_optab, mode);
21718 break;
21720 case LT:
21721 case LE:
21722 libfunc = optab_libfunc (le_optab, mode);
21723 break;
21725 case UNORDERED:
21726 case ORDERED:
21727 libfunc = optab_libfunc (unord_optab, mode);
21728 code = (code == UNORDERED) ? NE : EQ;
21729 break;
21731 case UNGE:
21732 case UNGT:
21733 libfunc = optab_libfunc (le_optab, mode);
21734 code = (code == UNGE) ? GE : GT;
21735 break;
21737 case UNLE:
21738 case UNLT:
21739 libfunc = optab_libfunc (ge_optab, mode);
21740 code = (code == UNLE) ? LE : LT;
21741 break;
21743 case UNEQ:
21744 case LTGT:
21745 libfunc = optab_libfunc (le_optab, mode);
21746 uneq_or_ltgt = true;
21747 code = (code = UNEQ) ? NE : EQ;
21748 break;
21750 default:
21751 gcc_unreachable ();
21754 gcc_assert (libfunc);
21755 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21756 SImode, 2, op0, mode, op1, mode);
21758 /* If this is UNEQ or LTGT, we call __lekf2, which returns -1 for less
21759 than, 0 for equal, +1 for greater, and +2 for nan. We add 1, to give
21760 a value of 0..3, and then do and AND immediate of 1 to isolate whether
21761 it is 0/Nan (i.e. bottom bit is 0), or less than/greater than
21762 (i.e. bottom bit is 1). */
21763 if (uneq_or_ltgt)
21765 rtx add_result = gen_reg_rtx (SImode);
21766 rtx and_result = gen_reg_rtx (SImode);
21767 emit_insn (gen_addsi3 (add_result, dest, GEN_INT (1)));
21768 emit_insn (gen_andsi3 (and_result, add_result, GEN_INT (1)));
21769 dest = and_result;
21772 emit_insn (gen_rtx_SET (compare_result,
21773 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21776 else
21778 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21779 CLOBBERs to match cmptf_internal2 pattern. */
21780 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21781 && FLOAT128_IBM_P (GET_MODE (op0))
21782 && TARGET_HARD_FLOAT && TARGET_FPRS)
21783 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21784 gen_rtvec (10,
21785 gen_rtx_SET (compare_result,
21786 gen_rtx_COMPARE (comp_mode, op0, op1)),
21787 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21788 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21789 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21790 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21791 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21792 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21793 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21794 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21795 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21796 else if (GET_CODE (op1) == UNSPEC
21797 && XINT (op1, 1) == UNSPEC_SP_TEST)
21799 rtx op1b = XVECEXP (op1, 0, 0);
21800 comp_mode = CCEQmode;
21801 compare_result = gen_reg_rtx (CCEQmode);
21802 if (TARGET_64BIT)
21803 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21804 else
21805 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21807 else
21808 emit_insn (gen_rtx_SET (compare_result,
21809 gen_rtx_COMPARE (comp_mode, op0, op1)));
21812 /* Some kinds of FP comparisons need an OR operation;
21813 under flag_finite_math_only we don't bother. */
21814 if (FLOAT_MODE_P (mode)
21815 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21816 && !flag_finite_math_only
21817 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
21818 && (code == LE || code == GE
21819 || code == UNEQ || code == LTGT
21820 || code == UNGT || code == UNLT))
21822 enum rtx_code or1, or2;
21823 rtx or1_rtx, or2_rtx, compare2_rtx;
21824 rtx or_result = gen_reg_rtx (CCEQmode);
21826 switch (code)
21828 case LE: or1 = LT; or2 = EQ; break;
21829 case GE: or1 = GT; or2 = EQ; break;
21830 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21831 case LTGT: or1 = LT; or2 = GT; break;
21832 case UNGT: or1 = UNORDERED; or2 = GT; break;
21833 case UNLT: or1 = UNORDERED; or2 = LT; break;
21834 default: gcc_unreachable ();
21836 validate_condition_mode (or1, comp_mode);
21837 validate_condition_mode (or2, comp_mode);
21838 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21839 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21840 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21841 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21842 const_true_rtx);
21843 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21845 compare_result = or_result;
21846 code = EQ;
21849 validate_condition_mode (code, GET_MODE (compare_result));
21851 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21855 /* Return the diagnostic message string if the binary operation OP is
21856 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21858 static const char*
21859 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21860 const_tree type1,
21861 const_tree type2)
21863 enum machine_mode mode1 = TYPE_MODE (type1);
21864 enum machine_mode mode2 = TYPE_MODE (type2);
21866 /* For complex modes, use the inner type. */
21867 if (COMPLEX_MODE_P (mode1))
21868 mode1 = GET_MODE_INNER (mode1);
21870 if (COMPLEX_MODE_P (mode2))
21871 mode2 = GET_MODE_INNER (mode2);
21873 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21874 double to intermix unless -mfloat128-convert. */
21875 if (mode1 == mode2)
21876 return NULL;
21878 if (!TARGET_FLOAT128_CVT)
21880 if ((mode1 == KFmode && mode2 == IFmode)
21881 || (mode1 == IFmode && mode2 == KFmode))
21882 return N_("__float128 and __ibm128 cannot be used in the same "
21883 "expression");
21885 if (TARGET_IEEEQUAD
21886 && ((mode1 == IFmode && mode2 == TFmode)
21887 || (mode1 == TFmode && mode2 == IFmode)))
21888 return N_("__ibm128 and long double cannot be used in the same "
21889 "expression");
21891 if (!TARGET_IEEEQUAD
21892 && ((mode1 == KFmode && mode2 == TFmode)
21893 || (mode1 == TFmode && mode2 == KFmode)))
21894 return N_("__float128 and long double cannot be used in the same "
21895 "expression");
21898 return NULL;
21902 /* Expand floating point conversion to/from __float128 and __ibm128. */
21904 void
21905 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21907 machine_mode dest_mode = GET_MODE (dest);
21908 machine_mode src_mode = GET_MODE (src);
21909 convert_optab cvt = unknown_optab;
21910 bool do_move = false;
21911 rtx libfunc = NULL_RTX;
21912 rtx dest2;
21913 typedef rtx (*rtx_2func_t) (rtx, rtx);
21914 rtx_2func_t hw_convert = (rtx_2func_t)0;
21915 size_t kf_or_tf;
21917 struct hw_conv_t {
21918 rtx_2func_t from_df;
21919 rtx_2func_t from_sf;
21920 rtx_2func_t from_si_sign;
21921 rtx_2func_t from_si_uns;
21922 rtx_2func_t from_di_sign;
21923 rtx_2func_t from_di_uns;
21924 rtx_2func_t to_df;
21925 rtx_2func_t to_sf;
21926 rtx_2func_t to_si_sign;
21927 rtx_2func_t to_si_uns;
21928 rtx_2func_t to_di_sign;
21929 rtx_2func_t to_di_uns;
21930 } hw_conversions[2] = {
21931 /* convertions to/from KFmode */
21933 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21934 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21935 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21936 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21937 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21938 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21939 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21940 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21941 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21942 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21943 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21944 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21947 /* convertions to/from TFmode */
21949 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21950 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21951 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21952 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21953 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21954 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21955 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21956 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21957 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21958 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21959 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21960 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21964 if (dest_mode == src_mode)
21965 gcc_unreachable ();
21967 /* Eliminate memory operations. */
21968 if (MEM_P (src))
21969 src = force_reg (src_mode, src);
21971 if (MEM_P (dest))
21973 rtx tmp = gen_reg_rtx (dest_mode);
21974 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21975 rs6000_emit_move (dest, tmp, dest_mode);
21976 return;
21979 /* Convert to IEEE 128-bit floating point. */
21980 if (FLOAT128_IEEE_P (dest_mode))
21982 if (dest_mode == KFmode)
21983 kf_or_tf = 0;
21984 else if (dest_mode == TFmode)
21985 kf_or_tf = 1;
21986 else
21987 gcc_unreachable ();
21989 switch (src_mode)
21991 case DFmode:
21992 cvt = sext_optab;
21993 hw_convert = hw_conversions[kf_or_tf].from_df;
21994 break;
21996 case SFmode:
21997 cvt = sext_optab;
21998 hw_convert = hw_conversions[kf_or_tf].from_sf;
21999 break;
22001 case KFmode:
22002 case IFmode:
22003 case TFmode:
22004 if (FLOAT128_IBM_P (src_mode))
22005 cvt = sext_optab;
22006 else
22007 do_move = true;
22008 break;
22010 case SImode:
22011 if (unsigned_p)
22013 cvt = ufloat_optab;
22014 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22016 else
22018 cvt = sfloat_optab;
22019 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22021 break;
22023 case DImode:
22024 if (unsigned_p)
22026 cvt = ufloat_optab;
22027 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22029 else
22031 cvt = sfloat_optab;
22032 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22034 break;
22036 default:
22037 gcc_unreachable ();
22041 /* Convert from IEEE 128-bit floating point. */
22042 else if (FLOAT128_IEEE_P (src_mode))
22044 if (src_mode == KFmode)
22045 kf_or_tf = 0;
22046 else if (src_mode == TFmode)
22047 kf_or_tf = 1;
22048 else
22049 gcc_unreachable ();
22051 switch (dest_mode)
22053 case DFmode:
22054 cvt = trunc_optab;
22055 hw_convert = hw_conversions[kf_or_tf].to_df;
22056 break;
22058 case SFmode:
22059 cvt = trunc_optab;
22060 hw_convert = hw_conversions[kf_or_tf].to_sf;
22061 break;
22063 case KFmode:
22064 case IFmode:
22065 case TFmode:
22066 if (FLOAT128_IBM_P (dest_mode))
22067 cvt = trunc_optab;
22068 else
22069 do_move = true;
22070 break;
22072 case SImode:
22073 if (unsigned_p)
22075 cvt = ufix_optab;
22076 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22078 else
22080 cvt = sfix_optab;
22081 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22083 break;
22085 case DImode:
22086 if (unsigned_p)
22088 cvt = ufix_optab;
22089 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22091 else
22093 cvt = sfix_optab;
22094 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22096 break;
22098 default:
22099 gcc_unreachable ();
22103 /* Both IBM format. */
22104 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22105 do_move = true;
22107 else
22108 gcc_unreachable ();
22110 /* Handle conversion between TFmode/KFmode. */
22111 if (do_move)
22112 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22114 /* Handle conversion if we have hardware support. */
22115 else if (TARGET_FLOAT128_HW && hw_convert)
22116 emit_insn ((hw_convert) (dest, src));
22118 /* Call an external function to do the conversion. */
22119 else if (cvt != unknown_optab)
22121 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22122 gcc_assert (libfunc != NULL_RTX);
22124 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
22125 src_mode);
22127 gcc_assert (dest2 != NULL_RTX);
22128 if (!rtx_equal_p (dest, dest2))
22129 emit_move_insn (dest, dest2);
22132 else
22133 gcc_unreachable ();
22135 return;
22138 /* Split a conversion from __float128 to an integer type into separate insns.
22139 OPERANDS points to the destination, source, and V2DI temporary
22140 register. CODE is either FIX or UNSIGNED_FIX. */
22142 void
22143 convert_float128_to_int (rtx *operands, enum rtx_code code)
22145 rtx dest = operands[0];
22146 rtx src = operands[1];
22147 rtx tmp = operands[2];
22148 rtx cvt;
22149 rtvec cvt_vec;
22150 rtx cvt_unspec;
22151 rtvec move_vec;
22152 rtx move_unspec;
22154 if (GET_CODE (tmp) == SCRATCH)
22155 tmp = gen_reg_rtx (V2DImode);
22157 if (MEM_P (dest))
22158 dest = rs6000_address_for_fpconvert (dest);
22160 /* Generate the actual convert insn of the form:
22161 (set (tmp) (unspec:V2DI [(fix:SI (reg:KF))] UNSPEC_IEEE128_CONVERT)). */
22162 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), src);
22163 cvt_vec = gen_rtvec (1, cvt);
22164 cvt_unspec = gen_rtx_UNSPEC (V2DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22165 emit_insn (gen_rtx_SET (tmp, cvt_unspec));
22167 /* Generate the move insn of the form:
22168 (set (dest:SI) (unspec:SI [(tmp:V2DI))] UNSPEC_IEEE128_MOVE)). */
22169 move_vec = gen_rtvec (1, tmp);
22170 move_unspec = gen_rtx_UNSPEC (GET_MODE (dest), move_vec, UNSPEC_IEEE128_MOVE);
22171 emit_insn (gen_rtx_SET (dest, move_unspec));
22174 /* Split a conversion from an integer type to __float128 into separate insns.
22175 OPERANDS points to the destination, source, and V2DI temporary
22176 register. CODE is either FLOAT or UNSIGNED_FLOAT. */
22178 void
22179 convert_int_to_float128 (rtx *operands, enum rtx_code code)
22181 rtx dest = operands[0];
22182 rtx src = operands[1];
22183 rtx tmp = operands[2];
22184 rtx cvt;
22185 rtvec cvt_vec;
22186 rtx cvt_unspec;
22187 rtvec move_vec;
22188 rtx move_unspec;
22189 rtx unsigned_flag;
22191 if (GET_CODE (tmp) == SCRATCH)
22192 tmp = gen_reg_rtx (V2DImode);
22194 if (MEM_P (src))
22195 src = rs6000_address_for_fpconvert (src);
22197 /* Generate the move of the integer into the Altivec register of the form:
22198 (set (tmp:V2DI) (unspec:V2DI [(src:SI)
22199 (const_int 0)] UNSPEC_IEEE128_MOVE)).
22202 (set (tmp:V2DI) (unspec:V2DI [(src:DI)] UNSPEC_IEEE128_MOVE)). */
22204 if (GET_MODE (src) == SImode)
22206 unsigned_flag = (code == UNSIGNED_FLOAT) ? const1_rtx : const0_rtx;
22207 move_vec = gen_rtvec (2, src, unsigned_flag);
22209 else
22210 move_vec = gen_rtvec (1, src);
22212 move_unspec = gen_rtx_UNSPEC (V2DImode, move_vec, UNSPEC_IEEE128_MOVE);
22213 emit_insn (gen_rtx_SET (tmp, move_unspec));
22215 /* Generate the actual convert insn of the form:
22216 (set (dest:KF) (float:KF (unspec:DI [(tmp:V2DI)]
22217 UNSPEC_IEEE128_CONVERT))). */
22218 cvt_vec = gen_rtvec (1, tmp);
22219 cvt_unspec = gen_rtx_UNSPEC (DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
22220 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), cvt_unspec);
22221 emit_insn (gen_rtx_SET (dest, cvt));
22225 /* Emit the RTL for an sISEL pattern. */
22227 void
22228 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22230 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22233 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22234 can be used as that dest register. Return the dest register. */
22237 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22239 if (op2 == const0_rtx)
22240 return op1;
22242 if (GET_CODE (scratch) == SCRATCH)
22243 scratch = gen_reg_rtx (mode);
22245 if (logical_operand (op2, mode))
22246 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22247 else
22248 emit_insn (gen_rtx_SET (scratch,
22249 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22251 return scratch;
22254 void
22255 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22257 rtx condition_rtx;
22258 machine_mode op_mode;
22259 enum rtx_code cond_code;
22260 rtx result = operands[0];
22262 condition_rtx = rs6000_generate_compare (operands[1], mode);
22263 cond_code = GET_CODE (condition_rtx);
22265 if (FLOAT_MODE_P (mode)
22266 && !TARGET_FPRS && TARGET_HARD_FLOAT)
22268 rtx t;
22270 PUT_MODE (condition_rtx, SImode);
22271 t = XEXP (condition_rtx, 0);
22273 gcc_assert (cond_code == NE || cond_code == EQ);
22275 if (cond_code == NE)
22276 emit_insn (gen_e500_flip_gt_bit (t, t));
22278 emit_insn (gen_move_from_CR_gt_bit (result, t));
22279 return;
22282 if (cond_code == NE
22283 || cond_code == GE || cond_code == LE
22284 || cond_code == GEU || cond_code == LEU
22285 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22287 rtx not_result = gen_reg_rtx (CCEQmode);
22288 rtx not_op, rev_cond_rtx;
22289 machine_mode cc_mode;
22291 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22293 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22294 SImode, XEXP (condition_rtx, 0), const0_rtx);
22295 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22296 emit_insn (gen_rtx_SET (not_result, not_op));
22297 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22300 op_mode = GET_MODE (XEXP (operands[1], 0));
22301 if (op_mode == VOIDmode)
22302 op_mode = GET_MODE (XEXP (operands[1], 1));
22304 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22306 PUT_MODE (condition_rtx, DImode);
22307 convert_move (result, condition_rtx, 0);
22309 else
22311 PUT_MODE (condition_rtx, SImode);
22312 emit_insn (gen_rtx_SET (result, condition_rtx));
22316 /* Emit a branch of kind CODE to location LOC. */
22318 void
22319 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22321 rtx condition_rtx, loc_ref;
22323 condition_rtx = rs6000_generate_compare (operands[0], mode);
22324 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22325 emit_jump_insn (gen_rtx_SET (pc_rtx,
22326 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22327 loc_ref, pc_rtx)));
22330 /* Return the string to output a conditional branch to LABEL, which is
22331 the operand template of the label, or NULL if the branch is really a
22332 conditional return.
22334 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22335 condition code register and its mode specifies what kind of
22336 comparison we made.
22338 REVERSED is nonzero if we should reverse the sense of the comparison.
22340 INSN is the insn. */
22342 char *
22343 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22345 static char string[64];
22346 enum rtx_code code = GET_CODE (op);
22347 rtx cc_reg = XEXP (op, 0);
22348 machine_mode mode = GET_MODE (cc_reg);
22349 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22350 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22351 int really_reversed = reversed ^ need_longbranch;
22352 char *s = string;
22353 const char *ccode;
22354 const char *pred;
22355 rtx note;
22357 validate_condition_mode (code, mode);
22359 /* Work out which way this really branches. We could use
22360 reverse_condition_maybe_unordered here always but this
22361 makes the resulting assembler clearer. */
22362 if (really_reversed)
22364 /* Reversal of FP compares takes care -- an ordered compare
22365 becomes an unordered compare and vice versa. */
22366 if (mode == CCFPmode)
22367 code = reverse_condition_maybe_unordered (code);
22368 else
22369 code = reverse_condition (code);
22372 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
22374 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
22375 to the GT bit. */
22376 switch (code)
22378 case EQ:
22379 /* Opposite of GT. */
22380 code = GT;
22381 break;
22383 case NE:
22384 code = UNLE;
22385 break;
22387 default:
22388 gcc_unreachable ();
22392 switch (code)
22394 /* Not all of these are actually distinct opcodes, but
22395 we distinguish them for clarity of the resulting assembler. */
22396 case NE: case LTGT:
22397 ccode = "ne"; break;
22398 case EQ: case UNEQ:
22399 ccode = "eq"; break;
22400 case GE: case GEU:
22401 ccode = "ge"; break;
22402 case GT: case GTU: case UNGT:
22403 ccode = "gt"; break;
22404 case LE: case LEU:
22405 ccode = "le"; break;
22406 case LT: case LTU: case UNLT:
22407 ccode = "lt"; break;
22408 case UNORDERED: ccode = "un"; break;
22409 case ORDERED: ccode = "nu"; break;
22410 case UNGE: ccode = "nl"; break;
22411 case UNLE: ccode = "ng"; break;
22412 default:
22413 gcc_unreachable ();
22416 /* Maybe we have a guess as to how likely the branch is. */
22417 pred = "";
22418 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22419 if (note != NULL_RTX)
22421 /* PROB is the difference from 50%. */
22422 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
22424 /* Only hint for highly probable/improbable branches on newer cpus when
22425 we have real profile data, as static prediction overrides processor
22426 dynamic prediction. For older cpus we may as well always hint, but
22427 assume not taken for branches that are very close to 50% as a
22428 mispredicted taken branch is more expensive than a
22429 mispredicted not-taken branch. */
22430 if (rs6000_always_hint
22431 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22432 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22433 && br_prob_note_reliable_p (note)))
22435 if (abs (prob) > REG_BR_PROB_BASE / 20
22436 && ((prob > 0) ^ need_longbranch))
22437 pred = "+";
22438 else
22439 pred = "-";
22443 if (label == NULL)
22444 s += sprintf (s, "b%slr%s ", ccode, pred);
22445 else
22446 s += sprintf (s, "b%s%s ", ccode, pred);
22448 /* We need to escape any '%' characters in the reg_names string.
22449 Assume they'd only be the first character.... */
22450 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22451 *s++ = '%';
22452 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22454 if (label != NULL)
22456 /* If the branch distance was too far, we may have to use an
22457 unconditional branch to go the distance. */
22458 if (need_longbranch)
22459 s += sprintf (s, ",$+8\n\tb %s", label);
22460 else
22461 s += sprintf (s, ",%s", label);
22464 return string;
22467 /* Return the string to flip the GT bit on a CR. */
22468 char *
22469 output_e500_flip_gt_bit (rtx dst, rtx src)
22471 static char string[64];
22472 int a, b;
22474 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
22475 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
22477 /* GT bit. */
22478 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
22479 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
22481 sprintf (string, "crnot %d,%d", a, b);
22482 return string;
22485 /* Return insn for VSX or Altivec comparisons. */
22487 static rtx
22488 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22490 rtx mask;
22491 machine_mode mode = GET_MODE (op0);
22493 switch (code)
22495 default:
22496 break;
22498 case GE:
22499 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22500 return NULL_RTX;
22502 case EQ:
22503 case GT:
22504 case GTU:
22505 case ORDERED:
22506 case UNORDERED:
22507 case UNEQ:
22508 case LTGT:
22509 mask = gen_reg_rtx (mode);
22510 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22511 return mask;
22514 return NULL_RTX;
22517 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22518 DMODE is expected destination mode. This is a recursive function. */
22520 static rtx
22521 rs6000_emit_vector_compare (enum rtx_code rcode,
22522 rtx op0, rtx op1,
22523 machine_mode dmode)
22525 rtx mask;
22526 bool swap_operands = false;
22527 bool try_again = false;
22529 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22530 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22532 /* See if the comparison works as is. */
22533 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22534 if (mask)
22535 return mask;
22537 switch (rcode)
22539 case LT:
22540 rcode = GT;
22541 swap_operands = true;
22542 try_again = true;
22543 break;
22544 case LTU:
22545 rcode = GTU;
22546 swap_operands = true;
22547 try_again = true;
22548 break;
22549 case NE:
22550 case UNLE:
22551 case UNLT:
22552 case UNGE:
22553 case UNGT:
22554 /* Invert condition and try again.
22555 e.g., A != B becomes ~(A==B). */
22557 enum rtx_code rev_code;
22558 enum insn_code nor_code;
22559 rtx mask2;
22561 rev_code = reverse_condition_maybe_unordered (rcode);
22562 if (rev_code == UNKNOWN)
22563 return NULL_RTX;
22565 nor_code = optab_handler (one_cmpl_optab, dmode);
22566 if (nor_code == CODE_FOR_nothing)
22567 return NULL_RTX;
22569 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22570 if (!mask2)
22571 return NULL_RTX;
22573 mask = gen_reg_rtx (dmode);
22574 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22575 return mask;
22577 break;
22578 case GE:
22579 case GEU:
22580 case LE:
22581 case LEU:
22582 /* Try GT/GTU/LT/LTU OR EQ */
22584 rtx c_rtx, eq_rtx;
22585 enum insn_code ior_code;
22586 enum rtx_code new_code;
22588 switch (rcode)
22590 case GE:
22591 new_code = GT;
22592 break;
22594 case GEU:
22595 new_code = GTU;
22596 break;
22598 case LE:
22599 new_code = LT;
22600 break;
22602 case LEU:
22603 new_code = LTU;
22604 break;
22606 default:
22607 gcc_unreachable ();
22610 ior_code = optab_handler (ior_optab, dmode);
22611 if (ior_code == CODE_FOR_nothing)
22612 return NULL_RTX;
22614 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22615 if (!c_rtx)
22616 return NULL_RTX;
22618 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22619 if (!eq_rtx)
22620 return NULL_RTX;
22622 mask = gen_reg_rtx (dmode);
22623 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22624 return mask;
22626 break;
22627 default:
22628 return NULL_RTX;
22631 if (try_again)
22633 if (swap_operands)
22634 std::swap (op0, op1);
22636 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22637 if (mask)
22638 return mask;
22641 /* You only get two chances. */
22642 return NULL_RTX;
22645 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22646 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22647 operands for the relation operation COND. */
22650 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22651 rtx cond, rtx cc_op0, rtx cc_op1)
22653 machine_mode dest_mode = GET_MODE (dest);
22654 machine_mode mask_mode = GET_MODE (cc_op0);
22655 enum rtx_code rcode = GET_CODE (cond);
22656 machine_mode cc_mode = CCmode;
22657 rtx mask;
22658 rtx cond2;
22659 rtx tmp;
22660 bool invert_move = false;
22662 if (VECTOR_UNIT_NONE_P (dest_mode))
22663 return 0;
22665 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22666 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22668 switch (rcode)
22670 /* Swap operands if we can, and fall back to doing the operation as
22671 specified, and doing a NOR to invert the test. */
22672 case NE:
22673 case UNLE:
22674 case UNLT:
22675 case UNGE:
22676 case UNGT:
22677 /* Invert condition and try again.
22678 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22679 invert_move = true;
22680 rcode = reverse_condition_maybe_unordered (rcode);
22681 if (rcode == UNKNOWN)
22682 return 0;
22683 break;
22685 /* Mark unsigned tests with CCUNSmode. */
22686 case GTU:
22687 case GEU:
22688 case LTU:
22689 case LEU:
22690 cc_mode = CCUNSmode;
22691 break;
22693 default:
22694 break;
22697 /* Get the vector mask for the given relational operations. */
22698 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22700 if (!mask)
22701 return 0;
22703 if (invert_move)
22705 tmp = op_true;
22706 op_true = op_false;
22707 op_false = tmp;
22710 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22711 CONST0_RTX (dest_mode));
22712 emit_insn (gen_rtx_SET (dest,
22713 gen_rtx_IF_THEN_ELSE (dest_mode,
22714 cond2,
22715 op_true,
22716 op_false)));
22717 return 1;
22720 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22721 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22722 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22723 hardware has no such operation. */
22725 static int
22726 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22728 enum rtx_code code = GET_CODE (op);
22729 rtx op0 = XEXP (op, 0);
22730 rtx op1 = XEXP (op, 1);
22731 machine_mode compare_mode = GET_MODE (op0);
22732 machine_mode result_mode = GET_MODE (dest);
22733 bool max_p = false;
22735 if (result_mode != compare_mode)
22736 return 0;
22738 if (code == GE || code == GT)
22739 max_p = true;
22740 else if (code == LE || code == LT)
22741 max_p = false;
22742 else
22743 return 0;
22745 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22748 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22749 max_p = !max_p;
22751 else
22752 return 0;
22754 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22755 return 1;
22758 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22759 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22760 operands of the last comparison is nonzero/true, FALSE_COND if it is
22761 zero/false. Return 0 if the hardware has no such operation. */
22763 static int
22764 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22766 enum rtx_code code = GET_CODE (op);
22767 rtx op0 = XEXP (op, 0);
22768 rtx op1 = XEXP (op, 1);
22769 machine_mode result_mode = GET_MODE (dest);
22770 rtx compare_rtx;
22771 rtx cmove_rtx;
22772 rtx clobber_rtx;
22774 if (!can_create_pseudo_p ())
22775 return 0;
22777 switch (code)
22779 case EQ:
22780 case GE:
22781 case GT:
22782 break;
22784 case NE:
22785 case LT:
22786 case LE:
22787 code = swap_condition (code);
22788 std::swap (op0, op1);
22789 break;
22791 default:
22792 return 0;
22795 /* Generate: [(parallel [(set (dest)
22796 (if_then_else (op (cmp1) (cmp2))
22797 (true)
22798 (false)))
22799 (clobber (scratch))])]. */
22801 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22802 cmove_rtx = gen_rtx_SET (dest,
22803 gen_rtx_IF_THEN_ELSE (result_mode,
22804 compare_rtx,
22805 true_cond,
22806 false_cond));
22808 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22809 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22810 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22812 return 1;
22815 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22816 operands of the last comparison is nonzero/true, FALSE_COND if it
22817 is zero/false. Return 0 if the hardware has no such operation. */
22820 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22822 enum rtx_code code = GET_CODE (op);
22823 rtx op0 = XEXP (op, 0);
22824 rtx op1 = XEXP (op, 1);
22825 machine_mode compare_mode = GET_MODE (op0);
22826 machine_mode result_mode = GET_MODE (dest);
22827 rtx temp;
22828 bool is_against_zero;
22830 /* These modes should always match. */
22831 if (GET_MODE (op1) != compare_mode
22832 /* In the isel case however, we can use a compare immediate, so
22833 op1 may be a small constant. */
22834 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22835 return 0;
22836 if (GET_MODE (true_cond) != result_mode)
22837 return 0;
22838 if (GET_MODE (false_cond) != result_mode)
22839 return 0;
22841 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22842 if (TARGET_P9_MINMAX
22843 && (compare_mode == SFmode || compare_mode == DFmode)
22844 && (result_mode == SFmode || result_mode == DFmode))
22846 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22847 return 1;
22849 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22850 return 1;
22853 /* Don't allow using floating point comparisons for integer results for
22854 now. */
22855 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22856 return 0;
22858 /* First, work out if the hardware can do this at all, or
22859 if it's too slow.... */
22860 if (!FLOAT_MODE_P (compare_mode))
22862 if (TARGET_ISEL)
22863 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22864 return 0;
22866 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
22867 && SCALAR_FLOAT_MODE_P (compare_mode))
22868 return 0;
22870 is_against_zero = op1 == CONST0_RTX (compare_mode);
22872 /* A floating-point subtract might overflow, underflow, or produce
22873 an inexact result, thus changing the floating-point flags, so it
22874 can't be generated if we care about that. It's safe if one side
22875 of the construct is zero, since then no subtract will be
22876 generated. */
22877 if (SCALAR_FLOAT_MODE_P (compare_mode)
22878 && flag_trapping_math && ! is_against_zero)
22879 return 0;
22881 /* Eliminate half of the comparisons by switching operands, this
22882 makes the remaining code simpler. */
22883 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22884 || code == LTGT || code == LT || code == UNLE)
22886 code = reverse_condition_maybe_unordered (code);
22887 temp = true_cond;
22888 true_cond = false_cond;
22889 false_cond = temp;
22892 /* UNEQ and LTGT take four instructions for a comparison with zero,
22893 it'll probably be faster to use a branch here too. */
22894 if (code == UNEQ && HONOR_NANS (compare_mode))
22895 return 0;
22897 /* We're going to try to implement comparisons by performing
22898 a subtract, then comparing against zero. Unfortunately,
22899 Inf - Inf is NaN which is not zero, and so if we don't
22900 know that the operand is finite and the comparison
22901 would treat EQ different to UNORDERED, we can't do it. */
22902 if (HONOR_INFINITIES (compare_mode)
22903 && code != GT && code != UNGE
22904 && (GET_CODE (op1) != CONST_DOUBLE
22905 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22906 /* Constructs of the form (a OP b ? a : b) are safe. */
22907 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22908 || (! rtx_equal_p (op0, true_cond)
22909 && ! rtx_equal_p (op1, true_cond))))
22910 return 0;
22912 /* At this point we know we can use fsel. */
22914 /* Reduce the comparison to a comparison against zero. */
22915 if (! is_against_zero)
22917 temp = gen_reg_rtx (compare_mode);
22918 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22919 op0 = temp;
22920 op1 = CONST0_RTX (compare_mode);
22923 /* If we don't care about NaNs we can reduce some of the comparisons
22924 down to faster ones. */
22925 if (! HONOR_NANS (compare_mode))
22926 switch (code)
22928 case GT:
22929 code = LE;
22930 temp = true_cond;
22931 true_cond = false_cond;
22932 false_cond = temp;
22933 break;
22934 case UNGE:
22935 code = GE;
22936 break;
22937 case UNEQ:
22938 code = EQ;
22939 break;
22940 default:
22941 break;
22944 /* Now, reduce everything down to a GE. */
22945 switch (code)
22947 case GE:
22948 break;
22950 case LE:
22951 temp = gen_reg_rtx (compare_mode);
22952 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22953 op0 = temp;
22954 break;
22956 case ORDERED:
22957 temp = gen_reg_rtx (compare_mode);
22958 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22959 op0 = temp;
22960 break;
22962 case EQ:
22963 temp = gen_reg_rtx (compare_mode);
22964 emit_insn (gen_rtx_SET (temp,
22965 gen_rtx_NEG (compare_mode,
22966 gen_rtx_ABS (compare_mode, op0))));
22967 op0 = temp;
22968 break;
22970 case UNGE:
22971 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22972 temp = gen_reg_rtx (result_mode);
22973 emit_insn (gen_rtx_SET (temp,
22974 gen_rtx_IF_THEN_ELSE (result_mode,
22975 gen_rtx_GE (VOIDmode,
22976 op0, op1),
22977 true_cond, false_cond)));
22978 false_cond = true_cond;
22979 true_cond = temp;
22981 temp = gen_reg_rtx (compare_mode);
22982 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22983 op0 = temp;
22984 break;
22986 case GT:
22987 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22988 temp = gen_reg_rtx (result_mode);
22989 emit_insn (gen_rtx_SET (temp,
22990 gen_rtx_IF_THEN_ELSE (result_mode,
22991 gen_rtx_GE (VOIDmode,
22992 op0, op1),
22993 true_cond, false_cond)));
22994 true_cond = false_cond;
22995 false_cond = temp;
22997 temp = gen_reg_rtx (compare_mode);
22998 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22999 op0 = temp;
23000 break;
23002 default:
23003 gcc_unreachable ();
23006 emit_insn (gen_rtx_SET (dest,
23007 gen_rtx_IF_THEN_ELSE (result_mode,
23008 gen_rtx_GE (VOIDmode,
23009 op0, op1),
23010 true_cond, false_cond)));
23011 return 1;
23014 /* Same as above, but for ints (isel). */
23016 static int
23017 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23019 rtx condition_rtx, cr;
23020 machine_mode mode = GET_MODE (dest);
23021 enum rtx_code cond_code;
23022 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23023 bool signedp;
23025 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23026 return 0;
23028 /* We still have to do the compare, because isel doesn't do a
23029 compare, it just looks at the CRx bits set by a previous compare
23030 instruction. */
23031 condition_rtx = rs6000_generate_compare (op, mode);
23032 cond_code = GET_CODE (condition_rtx);
23033 cr = XEXP (condition_rtx, 0);
23034 signedp = GET_MODE (cr) == CCmode;
23036 isel_func = (mode == SImode
23037 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23038 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23040 switch (cond_code)
23042 case LT: case GT: case LTU: case GTU: case EQ:
23043 /* isel handles these directly. */
23044 break;
23046 default:
23047 /* We need to swap the sense of the comparison. */
23049 std::swap (false_cond, true_cond);
23050 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23052 break;
23055 false_cond = force_reg (mode, false_cond);
23056 if (true_cond != const0_rtx)
23057 true_cond = force_reg (mode, true_cond);
23059 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23061 return 1;
23064 const char *
23065 output_isel (rtx *operands)
23067 enum rtx_code code;
23069 code = GET_CODE (operands[1]);
23071 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23073 gcc_assert (GET_CODE (operands[2]) == REG
23074 && GET_CODE (operands[3]) == REG);
23075 PUT_CODE (operands[1], reverse_condition (code));
23076 return "isel %0,%3,%2,%j1";
23079 return "isel %0,%2,%3,%j1";
23082 void
23083 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23085 machine_mode mode = GET_MODE (op0);
23086 enum rtx_code c;
23087 rtx target;
23089 /* VSX/altivec have direct min/max insns. */
23090 if ((code == SMAX || code == SMIN)
23091 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23092 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23094 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23095 return;
23098 if (code == SMAX || code == SMIN)
23099 c = GE;
23100 else
23101 c = GEU;
23103 if (code == SMAX || code == UMAX)
23104 target = emit_conditional_move (dest, c, op0, op1, mode,
23105 op0, op1, mode, 0);
23106 else
23107 target = emit_conditional_move (dest, c, op0, op1, mode,
23108 op1, op0, mode, 0);
23109 gcc_assert (target);
23110 if (target != dest)
23111 emit_move_insn (dest, target);
23114 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23115 COND is true. Mark the jump as unlikely to be taken. */
23117 static void
23118 emit_unlikely_jump (rtx cond, rtx label)
23120 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
23121 rtx x;
23123 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23124 x = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23125 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
23128 /* A subroutine of the atomic operation splitters. Emit a load-locked
23129 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23130 the zero_extend operation. */
23132 static void
23133 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23135 rtx (*fn) (rtx, rtx) = NULL;
23137 switch (mode)
23139 case QImode:
23140 fn = gen_load_lockedqi;
23141 break;
23142 case HImode:
23143 fn = gen_load_lockedhi;
23144 break;
23145 case SImode:
23146 if (GET_MODE (mem) == QImode)
23147 fn = gen_load_lockedqi_si;
23148 else if (GET_MODE (mem) == HImode)
23149 fn = gen_load_lockedhi_si;
23150 else
23151 fn = gen_load_lockedsi;
23152 break;
23153 case DImode:
23154 fn = gen_load_lockeddi;
23155 break;
23156 case TImode:
23157 fn = gen_load_lockedti;
23158 break;
23159 default:
23160 gcc_unreachable ();
23162 emit_insn (fn (reg, mem));
23165 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23166 instruction in MODE. */
23168 static void
23169 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23171 rtx (*fn) (rtx, rtx, rtx) = NULL;
23173 switch (mode)
23175 case QImode:
23176 fn = gen_store_conditionalqi;
23177 break;
23178 case HImode:
23179 fn = gen_store_conditionalhi;
23180 break;
23181 case SImode:
23182 fn = gen_store_conditionalsi;
23183 break;
23184 case DImode:
23185 fn = gen_store_conditionaldi;
23186 break;
23187 case TImode:
23188 fn = gen_store_conditionalti;
23189 break;
23190 default:
23191 gcc_unreachable ();
23194 /* Emit sync before stwcx. to address PPC405 Erratum. */
23195 if (PPC405_ERRATUM77)
23196 emit_insn (gen_hwsync ());
23198 emit_insn (fn (res, mem, val));
23201 /* Expand barriers before and after a load_locked/store_cond sequence. */
23203 static rtx
23204 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23206 rtx addr = XEXP (mem, 0);
23207 int strict_p = (reload_in_progress || reload_completed);
23209 if (!legitimate_indirect_address_p (addr, strict_p)
23210 && !legitimate_indexed_address_p (addr, strict_p))
23212 addr = force_reg (Pmode, addr);
23213 mem = replace_equiv_address_nv (mem, addr);
23216 switch (model)
23218 case MEMMODEL_RELAXED:
23219 case MEMMODEL_CONSUME:
23220 case MEMMODEL_ACQUIRE:
23221 break;
23222 case MEMMODEL_RELEASE:
23223 case MEMMODEL_ACQ_REL:
23224 emit_insn (gen_lwsync ());
23225 break;
23226 case MEMMODEL_SEQ_CST:
23227 emit_insn (gen_hwsync ());
23228 break;
23229 default:
23230 gcc_unreachable ();
23232 return mem;
23235 static void
23236 rs6000_post_atomic_barrier (enum memmodel model)
23238 switch (model)
23240 case MEMMODEL_RELAXED:
23241 case MEMMODEL_CONSUME:
23242 case MEMMODEL_RELEASE:
23243 break;
23244 case MEMMODEL_ACQUIRE:
23245 case MEMMODEL_ACQ_REL:
23246 case MEMMODEL_SEQ_CST:
23247 emit_insn (gen_isync ());
23248 break;
23249 default:
23250 gcc_unreachable ();
23254 /* A subroutine of the various atomic expanders. For sub-word operations,
23255 we must adjust things to operate on SImode. Given the original MEM,
23256 return a new aligned memory. Also build and return the quantities by
23257 which to shift and mask. */
23259 static rtx
23260 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23262 rtx addr, align, shift, mask, mem;
23263 HOST_WIDE_INT shift_mask;
23264 machine_mode mode = GET_MODE (orig_mem);
23266 /* For smaller modes, we have to implement this via SImode. */
23267 shift_mask = (mode == QImode ? 0x18 : 0x10);
23269 addr = XEXP (orig_mem, 0);
23270 addr = force_reg (GET_MODE (addr), addr);
23272 /* Aligned memory containing subword. Generate a new memory. We
23273 do not want any of the existing MEM_ATTR data, as we're now
23274 accessing memory outside the original object. */
23275 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23276 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23277 mem = gen_rtx_MEM (SImode, align);
23278 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23279 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23280 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23282 /* Shift amount for subword relative to aligned word. */
23283 shift = gen_reg_rtx (SImode);
23284 addr = gen_lowpart (SImode, addr);
23285 rtx tmp = gen_reg_rtx (SImode);
23286 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23287 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23288 if (BYTES_BIG_ENDIAN)
23289 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23290 shift, 1, OPTAB_LIB_WIDEN);
23291 *pshift = shift;
23293 /* Mask for insertion. */
23294 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23295 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23296 *pmask = mask;
23298 return mem;
23301 /* A subroutine of the various atomic expanders. For sub-word operands,
23302 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23304 static rtx
23305 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23307 rtx x;
23309 x = gen_reg_rtx (SImode);
23310 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23311 gen_rtx_NOT (SImode, mask),
23312 oldval)));
23314 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23316 return x;
23319 /* A subroutine of the various atomic expanders. For sub-word operands,
23320 extract WIDE to NARROW via SHIFT. */
23322 static void
23323 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23325 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23326 wide, 1, OPTAB_LIB_WIDEN);
23327 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23330 /* Expand an atomic compare and swap operation. */
23332 void
23333 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23335 rtx boolval, retval, mem, oldval, newval, cond;
23336 rtx label1, label2, x, mask, shift;
23337 machine_mode mode, orig_mode;
23338 enum memmodel mod_s, mod_f;
23339 bool is_weak;
23341 boolval = operands[0];
23342 retval = operands[1];
23343 mem = operands[2];
23344 oldval = operands[3];
23345 newval = operands[4];
23346 is_weak = (INTVAL (operands[5]) != 0);
23347 mod_s = memmodel_base (INTVAL (operands[6]));
23348 mod_f = memmodel_base (INTVAL (operands[7]));
23349 orig_mode = mode = GET_MODE (mem);
23351 mask = shift = NULL_RTX;
23352 if (mode == QImode || mode == HImode)
23354 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23355 lwarx and shift/mask operations. With power8, we need to do the
23356 comparison in SImode, but the store is still done in QI/HImode. */
23357 oldval = convert_modes (SImode, mode, oldval, 1);
23359 if (!TARGET_SYNC_HI_QI)
23361 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23363 /* Shift and mask OLDVAL into position with the word. */
23364 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23365 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23367 /* Shift and mask NEWVAL into position within the word. */
23368 newval = convert_modes (SImode, mode, newval, 1);
23369 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23370 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23373 /* Prepare to adjust the return value. */
23374 retval = gen_reg_rtx (SImode);
23375 mode = SImode;
23377 else if (reg_overlap_mentioned_p (retval, oldval))
23378 oldval = copy_to_reg (oldval);
23380 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23381 oldval = copy_to_mode_reg (mode, oldval);
23383 if (reg_overlap_mentioned_p (retval, newval))
23384 newval = copy_to_reg (newval);
23386 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23388 label1 = NULL_RTX;
23389 if (!is_weak)
23391 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23392 emit_label (XEXP (label1, 0));
23394 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23396 emit_load_locked (mode, retval, mem);
23398 x = retval;
23399 if (mask)
23400 x = expand_simple_binop (SImode, AND, retval, mask,
23401 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23403 cond = gen_reg_rtx (CCmode);
23404 /* If we have TImode, synthesize a comparison. */
23405 if (mode != TImode)
23406 x = gen_rtx_COMPARE (CCmode, x, oldval);
23407 else
23409 rtx xor1_result = gen_reg_rtx (DImode);
23410 rtx xor2_result = gen_reg_rtx (DImode);
23411 rtx or_result = gen_reg_rtx (DImode);
23412 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23413 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23414 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23415 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23417 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23418 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23419 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23420 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23423 emit_insn (gen_rtx_SET (cond, x));
23425 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23426 emit_unlikely_jump (x, label2);
23428 x = newval;
23429 if (mask)
23430 x = rs6000_mask_atomic_subword (retval, newval, mask);
23432 emit_store_conditional (orig_mode, cond, mem, x);
23434 if (!is_weak)
23436 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23437 emit_unlikely_jump (x, label1);
23440 if (!is_mm_relaxed (mod_f))
23441 emit_label (XEXP (label2, 0));
23443 rs6000_post_atomic_barrier (mod_s);
23445 if (is_mm_relaxed (mod_f))
23446 emit_label (XEXP (label2, 0));
23448 if (shift)
23449 rs6000_finish_atomic_subword (operands[1], retval, shift);
23450 else if (mode != GET_MODE (operands[1]))
23451 convert_move (operands[1], retval, 1);
23453 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23454 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23455 emit_insn (gen_rtx_SET (boolval, x));
23458 /* Expand an atomic exchange operation. */
23460 void
23461 rs6000_expand_atomic_exchange (rtx operands[])
23463 rtx retval, mem, val, cond;
23464 machine_mode mode;
23465 enum memmodel model;
23466 rtx label, x, mask, shift;
23468 retval = operands[0];
23469 mem = operands[1];
23470 val = operands[2];
23471 model = memmodel_base (INTVAL (operands[3]));
23472 mode = GET_MODE (mem);
23474 mask = shift = NULL_RTX;
23475 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23477 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23479 /* Shift and mask VAL into position with the word. */
23480 val = convert_modes (SImode, mode, val, 1);
23481 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23482 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23484 /* Prepare to adjust the return value. */
23485 retval = gen_reg_rtx (SImode);
23486 mode = SImode;
23489 mem = rs6000_pre_atomic_barrier (mem, model);
23491 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23492 emit_label (XEXP (label, 0));
23494 emit_load_locked (mode, retval, mem);
23496 x = val;
23497 if (mask)
23498 x = rs6000_mask_atomic_subword (retval, val, mask);
23500 cond = gen_reg_rtx (CCmode);
23501 emit_store_conditional (mode, cond, mem, x);
23503 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23504 emit_unlikely_jump (x, label);
23506 rs6000_post_atomic_barrier (model);
23508 if (shift)
23509 rs6000_finish_atomic_subword (operands[0], retval, shift);
23512 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23513 to perform. MEM is the memory on which to operate. VAL is the second
23514 operand of the binary operator. BEFORE and AFTER are optional locations to
23515 return the value of MEM either before of after the operation. MODEL_RTX
23516 is a CONST_INT containing the memory model to use. */
23518 void
23519 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23520 rtx orig_before, rtx orig_after, rtx model_rtx)
23522 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23523 machine_mode mode = GET_MODE (mem);
23524 machine_mode store_mode = mode;
23525 rtx label, x, cond, mask, shift;
23526 rtx before = orig_before, after = orig_after;
23528 mask = shift = NULL_RTX;
23529 /* On power8, we want to use SImode for the operation. On previous systems,
23530 use the operation in a subword and shift/mask to get the proper byte or
23531 halfword. */
23532 if (mode == QImode || mode == HImode)
23534 if (TARGET_SYNC_HI_QI)
23536 val = convert_modes (SImode, mode, val, 1);
23538 /* Prepare to adjust the return value. */
23539 before = gen_reg_rtx (SImode);
23540 if (after)
23541 after = gen_reg_rtx (SImode);
23542 mode = SImode;
23544 else
23546 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23548 /* Shift and mask VAL into position with the word. */
23549 val = convert_modes (SImode, mode, val, 1);
23550 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23551 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23553 switch (code)
23555 case IOR:
23556 case XOR:
23557 /* We've already zero-extended VAL. That is sufficient to
23558 make certain that it does not affect other bits. */
23559 mask = NULL;
23560 break;
23562 case AND:
23563 /* If we make certain that all of the other bits in VAL are
23564 set, that will be sufficient to not affect other bits. */
23565 x = gen_rtx_NOT (SImode, mask);
23566 x = gen_rtx_IOR (SImode, x, val);
23567 emit_insn (gen_rtx_SET (val, x));
23568 mask = NULL;
23569 break;
23571 case NOT:
23572 case PLUS:
23573 case MINUS:
23574 /* These will all affect bits outside the field and need
23575 adjustment via MASK within the loop. */
23576 break;
23578 default:
23579 gcc_unreachable ();
23582 /* Prepare to adjust the return value. */
23583 before = gen_reg_rtx (SImode);
23584 if (after)
23585 after = gen_reg_rtx (SImode);
23586 store_mode = mode = SImode;
23590 mem = rs6000_pre_atomic_barrier (mem, model);
23592 label = gen_label_rtx ();
23593 emit_label (label);
23594 label = gen_rtx_LABEL_REF (VOIDmode, label);
23596 if (before == NULL_RTX)
23597 before = gen_reg_rtx (mode);
23599 emit_load_locked (mode, before, mem);
23601 if (code == NOT)
23603 x = expand_simple_binop (mode, AND, before, val,
23604 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23605 after = expand_simple_unop (mode, NOT, x, after, 1);
23607 else
23609 after = expand_simple_binop (mode, code, before, val,
23610 after, 1, OPTAB_LIB_WIDEN);
23613 x = after;
23614 if (mask)
23616 x = expand_simple_binop (SImode, AND, after, mask,
23617 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23618 x = rs6000_mask_atomic_subword (before, x, mask);
23620 else if (store_mode != mode)
23621 x = convert_modes (store_mode, mode, x, 1);
23623 cond = gen_reg_rtx (CCmode);
23624 emit_store_conditional (store_mode, cond, mem, x);
23626 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23627 emit_unlikely_jump (x, label);
23629 rs6000_post_atomic_barrier (model);
23631 if (shift)
23633 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23634 then do the calcuations in a SImode register. */
23635 if (orig_before)
23636 rs6000_finish_atomic_subword (orig_before, before, shift);
23637 if (orig_after)
23638 rs6000_finish_atomic_subword (orig_after, after, shift);
23640 else if (store_mode != mode)
23642 /* QImode/HImode on machines with lbarx/lharx where we do the native
23643 operation and then do the calcuations in a SImode register. */
23644 if (orig_before)
23645 convert_move (orig_before, before, 1);
23646 if (orig_after)
23647 convert_move (orig_after, after, 1);
23649 else if (orig_after && after != orig_after)
23650 emit_move_insn (orig_after, after);
23653 /* Emit instructions to move SRC to DST. Called by splitters for
23654 multi-register moves. It will emit at most one instruction for
23655 each register that is accessed; that is, it won't emit li/lis pairs
23656 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23657 register. */
23659 void
23660 rs6000_split_multireg_move (rtx dst, rtx src)
23662 /* The register number of the first register being moved. */
23663 int reg;
23664 /* The mode that is to be moved. */
23665 machine_mode mode;
23666 /* The mode that the move is being done in, and its size. */
23667 machine_mode reg_mode;
23668 int reg_mode_size;
23669 /* The number of registers that will be moved. */
23670 int nregs;
23672 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23673 mode = GET_MODE (dst);
23674 nregs = hard_regno_nregs[reg][mode];
23675 if (FP_REGNO_P (reg))
23676 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23677 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23678 else if (ALTIVEC_REGNO_P (reg))
23679 reg_mode = V16QImode;
23680 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
23681 reg_mode = DFmode;
23682 else
23683 reg_mode = word_mode;
23684 reg_mode_size = GET_MODE_SIZE (reg_mode);
23686 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23688 /* TDmode residing in FP registers is special, since the ISA requires that
23689 the lower-numbered word of a register pair is always the most significant
23690 word, even in little-endian mode. This does not match the usual subreg
23691 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23692 the appropriate constituent registers "by hand" in little-endian mode.
23694 Note we do not need to check for destructive overlap here since TDmode
23695 can only reside in even/odd register pairs. */
23696 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23698 rtx p_src, p_dst;
23699 int i;
23701 for (i = 0; i < nregs; i++)
23703 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23704 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23705 else
23706 p_src = simplify_gen_subreg (reg_mode, src, mode,
23707 i * reg_mode_size);
23709 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23710 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23711 else
23712 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23713 i * reg_mode_size);
23715 emit_insn (gen_rtx_SET (p_dst, p_src));
23718 return;
23721 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23723 /* Move register range backwards, if we might have destructive
23724 overlap. */
23725 int i;
23726 for (i = nregs - 1; i >= 0; i--)
23727 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23728 i * reg_mode_size),
23729 simplify_gen_subreg (reg_mode, src, mode,
23730 i * reg_mode_size)));
23732 else
23734 int i;
23735 int j = -1;
23736 bool used_update = false;
23737 rtx restore_basereg = NULL_RTX;
23739 if (MEM_P (src) && INT_REGNO_P (reg))
23741 rtx breg;
23743 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23744 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23746 rtx delta_rtx;
23747 breg = XEXP (XEXP (src, 0), 0);
23748 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23749 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23750 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23751 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23752 src = replace_equiv_address (src, breg);
23754 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23756 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23758 rtx basereg = XEXP (XEXP (src, 0), 0);
23759 if (TARGET_UPDATE)
23761 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23762 emit_insn (gen_rtx_SET (ndst,
23763 gen_rtx_MEM (reg_mode,
23764 XEXP (src, 0))));
23765 used_update = true;
23767 else
23768 emit_insn (gen_rtx_SET (basereg,
23769 XEXP (XEXP (src, 0), 1)));
23770 src = replace_equiv_address (src, basereg);
23772 else
23774 rtx basereg = gen_rtx_REG (Pmode, reg);
23775 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23776 src = replace_equiv_address (src, basereg);
23780 breg = XEXP (src, 0);
23781 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23782 breg = XEXP (breg, 0);
23784 /* If the base register we are using to address memory is
23785 also a destination reg, then change that register last. */
23786 if (REG_P (breg)
23787 && REGNO (breg) >= REGNO (dst)
23788 && REGNO (breg) < REGNO (dst) + nregs)
23789 j = REGNO (breg) - REGNO (dst);
23791 else if (MEM_P (dst) && INT_REGNO_P (reg))
23793 rtx breg;
23795 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23796 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23798 rtx delta_rtx;
23799 breg = XEXP (XEXP (dst, 0), 0);
23800 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23801 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23802 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23804 /* We have to update the breg before doing the store.
23805 Use store with update, if available. */
23807 if (TARGET_UPDATE)
23809 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23810 emit_insn (TARGET_32BIT
23811 ? (TARGET_POWERPC64
23812 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23813 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23814 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23815 used_update = true;
23817 else
23818 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23819 dst = replace_equiv_address (dst, breg);
23821 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23822 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23824 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23826 rtx basereg = XEXP (XEXP (dst, 0), 0);
23827 if (TARGET_UPDATE)
23829 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23830 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23831 XEXP (dst, 0)),
23832 nsrc));
23833 used_update = true;
23835 else
23836 emit_insn (gen_rtx_SET (basereg,
23837 XEXP (XEXP (dst, 0), 1)));
23838 dst = replace_equiv_address (dst, basereg);
23840 else
23842 rtx basereg = XEXP (XEXP (dst, 0), 0);
23843 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23844 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23845 && REG_P (basereg)
23846 && REG_P (offsetreg)
23847 && REGNO (basereg) != REGNO (offsetreg));
23848 if (REGNO (basereg) == 0)
23850 rtx tmp = offsetreg;
23851 offsetreg = basereg;
23852 basereg = tmp;
23854 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23855 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23856 dst = replace_equiv_address (dst, basereg);
23859 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23860 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
23863 for (i = 0; i < nregs; i++)
23865 /* Calculate index to next subword. */
23866 ++j;
23867 if (j == nregs)
23868 j = 0;
23870 /* If compiler already emitted move of first word by
23871 store with update, no need to do anything. */
23872 if (j == 0 && used_update)
23873 continue;
23875 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23876 j * reg_mode_size),
23877 simplify_gen_subreg (reg_mode, src, mode,
23878 j * reg_mode_size)));
23880 if (restore_basereg != NULL_RTX)
23881 emit_insn (restore_basereg);
23886 /* This page contains routines that are used to determine what the
23887 function prologue and epilogue code will do and write them out. */
23889 static inline bool
23890 save_reg_p (int r)
23892 return !call_used_regs[r] && df_regs_ever_live_p (r);
23895 /* Determine whether the gp REG is really used. */
23897 static bool
23898 rs6000_reg_live_or_pic_offset_p (int reg)
23900 /* We need to mark the PIC offset register live for the same conditions
23901 as it is set up, or otherwise it won't be saved before we clobber it. */
23903 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23905 if (TARGET_TOC && TARGET_MINIMAL_TOC
23906 && (crtl->calls_eh_return
23907 || df_regs_ever_live_p (reg)
23908 || get_pool_size ()))
23909 return true;
23911 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23912 && flag_pic)
23913 return true;
23916 /* If the function calls eh_return, claim used all the registers that would
23917 be checked for liveness otherwise. */
23919 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
23920 && !call_used_regs[reg]);
23923 /* Return the first fixed-point register that is required to be
23924 saved. 32 if none. */
23927 first_reg_to_save (void)
23929 int first_reg;
23931 /* Find lowest numbered live register. */
23932 for (first_reg = 13; first_reg <= 31; first_reg++)
23933 if (save_reg_p (first_reg))
23934 break;
23936 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
23937 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
23938 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23939 || (TARGET_TOC && TARGET_MINIMAL_TOC))
23940 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23941 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
23943 #if TARGET_MACHO
23944 if (flag_pic
23945 && crtl->uses_pic_offset_table
23946 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23947 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23948 #endif
23950 return first_reg;
23953 /* Similar, for FP regs. */
23956 first_fp_reg_to_save (void)
23958 int first_reg;
23960 /* Find lowest numbered live register. */
23961 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23962 if (save_reg_p (first_reg))
23963 break;
23965 return first_reg;
23968 /* Similar, for AltiVec regs. */
23970 static int
23971 first_altivec_reg_to_save (void)
23973 int i;
23975 /* Stack frame remains as is unless we are in AltiVec ABI. */
23976 if (! TARGET_ALTIVEC_ABI)
23977 return LAST_ALTIVEC_REGNO + 1;
23979 /* On Darwin, the unwind routines are compiled without
23980 TARGET_ALTIVEC, and use save_world to save/restore the
23981 altivec registers when necessary. */
23982 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23983 && ! TARGET_ALTIVEC)
23984 return FIRST_ALTIVEC_REGNO + 20;
23986 /* Find lowest numbered live register. */
23987 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23988 if (save_reg_p (i))
23989 break;
23991 return i;
23994 /* Return a 32-bit mask of the AltiVec registers we need to set in
23995 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23996 the 32-bit word is 0. */
23998 static unsigned int
23999 compute_vrsave_mask (void)
24001 unsigned int i, mask = 0;
24003 /* On Darwin, the unwind routines are compiled without
24004 TARGET_ALTIVEC, and use save_world to save/restore the
24005 call-saved altivec registers when necessary. */
24006 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24007 && ! TARGET_ALTIVEC)
24008 mask |= 0xFFF;
24010 /* First, find out if we use _any_ altivec registers. */
24011 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24012 if (df_regs_ever_live_p (i))
24013 mask |= ALTIVEC_REG_BIT (i);
24015 if (mask == 0)
24016 return mask;
24018 /* Next, remove the argument registers from the set. These must
24019 be in the VRSAVE mask set by the caller, so we don't need to add
24020 them in again. More importantly, the mask we compute here is
24021 used to generate CLOBBERs in the set_vrsave insn, and we do not
24022 wish the argument registers to die. */
24023 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24024 mask &= ~ALTIVEC_REG_BIT (i);
24026 /* Similarly, remove the return value from the set. */
24028 bool yes = false;
24029 diddle_return_value (is_altivec_return_reg, &yes);
24030 if (yes)
24031 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24034 return mask;
24037 /* For a very restricted set of circumstances, we can cut down the
24038 size of prologues/epilogues by calling our own save/restore-the-world
24039 routines. */
24041 static void
24042 compute_save_world_info (rs6000_stack_t *info)
24044 info->world_save_p = 1;
24045 info->world_save_p
24046 = (WORLD_SAVE_P (info)
24047 && DEFAULT_ABI == ABI_DARWIN
24048 && !cfun->has_nonlocal_label
24049 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24050 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24051 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24052 && info->cr_save_p);
24054 /* This will not work in conjunction with sibcalls. Make sure there
24055 are none. (This check is expensive, but seldom executed.) */
24056 if (WORLD_SAVE_P (info))
24058 rtx_insn *insn;
24059 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24060 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24062 info->world_save_p = 0;
24063 break;
24067 if (WORLD_SAVE_P (info))
24069 /* Even if we're not touching VRsave, make sure there's room on the
24070 stack for it, if it looks like we're calling SAVE_WORLD, which
24071 will attempt to save it. */
24072 info->vrsave_size = 4;
24074 /* If we are going to save the world, we need to save the link register too. */
24075 info->lr_save_p = 1;
24077 /* "Save" the VRsave register too if we're saving the world. */
24078 if (info->vrsave_mask == 0)
24079 info->vrsave_mask = compute_vrsave_mask ();
24081 /* Because the Darwin register save/restore routines only handle
24082 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24083 check. */
24084 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24085 && (info->first_altivec_reg_save
24086 >= FIRST_SAVED_ALTIVEC_REGNO));
24089 return;
24093 static void
24094 is_altivec_return_reg (rtx reg, void *xyes)
24096 bool *yes = (bool *) xyes;
24097 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24098 *yes = true;
24102 /* Return whether REG is a global user reg or has been specifed by
24103 -ffixed-REG. We should not restore these, and so cannot use
24104 lmw or out-of-line restore functions if there are any. We also
24105 can't save them (well, emit frame notes for them), because frame
24106 unwinding during exception handling will restore saved registers. */
24108 static bool
24109 fixed_reg_p (int reg)
24111 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24112 backend sets it, overriding anything the user might have given. */
24113 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24114 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24115 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24116 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24117 return false;
24119 return fixed_regs[reg];
24122 /* Determine the strategy for savings/restoring registers. */
24124 enum {
24125 SAVE_MULTIPLE = 0x1,
24126 SAVE_INLINE_GPRS = 0x2,
24127 SAVE_INLINE_FPRS = 0x4,
24128 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24129 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24130 SAVE_INLINE_VRS = 0x20,
24131 REST_MULTIPLE = 0x100,
24132 REST_INLINE_GPRS = 0x200,
24133 REST_INLINE_FPRS = 0x400,
24134 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24135 REST_INLINE_VRS = 0x1000
24138 static int
24139 rs6000_savres_strategy (rs6000_stack_t *info,
24140 bool using_static_chain_p)
24142 int strategy = 0;
24144 /* Select between in-line and out-of-line save and restore of regs.
24145 First, all the obvious cases where we don't use out-of-line. */
24146 if (crtl->calls_eh_return
24147 || cfun->machine->ra_need_lr)
24148 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24149 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24150 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24152 if (info->first_gp_reg_save == 32)
24153 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24155 if (info->first_fp_reg_save == 64
24156 /* The out-of-line FP routines use double-precision stores;
24157 we can't use those routines if we don't have such stores. */
24158 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24159 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24161 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24162 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24164 /* Define cutoff for using out-of-line functions to save registers. */
24165 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24167 if (!optimize_size)
24169 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24170 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24171 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24173 else
24175 /* Prefer out-of-line restore if it will exit. */
24176 if (info->first_fp_reg_save > 61)
24177 strategy |= SAVE_INLINE_FPRS;
24178 if (info->first_gp_reg_save > 29)
24180 if (info->first_fp_reg_save == 64)
24181 strategy |= SAVE_INLINE_GPRS;
24182 else
24183 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24185 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24186 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24189 else if (DEFAULT_ABI == ABI_DARWIN)
24191 if (info->first_fp_reg_save > 60)
24192 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24193 if (info->first_gp_reg_save > 29)
24194 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24195 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24197 else
24199 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24200 if (info->first_fp_reg_save > 61)
24201 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24202 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24203 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24206 /* Don't bother to try to save things out-of-line if r11 is occupied
24207 by the static chain. It would require too much fiddling and the
24208 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24209 pointer on Darwin, and AIX uses r1 or r12. */
24210 if (using_static_chain_p
24211 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24212 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24213 | SAVE_INLINE_GPRS
24214 | SAVE_INLINE_VRS);
24216 /* Saving CR interferes with the exit routines used on the SPE, so
24217 just punt here. */
24218 if (TARGET_SPE_ABI
24219 && info->spe_64bit_regs_used
24220 && info->cr_save_p)
24221 strategy |= REST_INLINE_GPRS;
24223 /* We can only use the out-of-line routines to restore fprs if we've
24224 saved all the registers from first_fp_reg_save in the prologue.
24225 Otherwise, we risk loading garbage. Of course, if we have saved
24226 out-of-line then we know we haven't skipped any fprs. */
24227 if ((strategy & SAVE_INLINE_FPRS)
24228 && !(strategy & REST_INLINE_FPRS))
24230 int i;
24232 for (i = info->first_fp_reg_save; i < 64; i++)
24233 if (fixed_regs[i] || !save_reg_p (i))
24235 strategy |= REST_INLINE_FPRS;
24236 break;
24240 /* Similarly, for altivec regs. */
24241 if ((strategy & SAVE_INLINE_VRS)
24242 && !(strategy & REST_INLINE_VRS))
24244 int i;
24246 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24247 if (fixed_regs[i] || !save_reg_p (i))
24249 strategy |= REST_INLINE_VRS;
24250 break;
24254 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24255 saved is an out-of-line save or restore. Set up the value for
24256 the next test (excluding out-of-line gprs). */
24257 bool lr_save_p = (info->lr_save_p
24258 || !(strategy & SAVE_INLINE_FPRS)
24259 || !(strategy & SAVE_INLINE_VRS)
24260 || !(strategy & REST_INLINE_FPRS)
24261 || !(strategy & REST_INLINE_VRS));
24263 if (TARGET_MULTIPLE
24264 && !TARGET_POWERPC64
24265 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
24266 && info->first_gp_reg_save < 31)
24268 /* Prefer store multiple for saves over out-of-line routines,
24269 since the store-multiple instruction will always be smaller. */
24270 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24272 /* The situation is more complicated with load multiple. We'd
24273 prefer to use the out-of-line routines for restores, since the
24274 "exit" out-of-line routines can handle the restore of LR and the
24275 frame teardown. However if doesn't make sense to use the
24276 out-of-line routine if that is the only reason we'd need to save
24277 LR, and we can't use the "exit" out-of-line gpr restore if we
24278 have saved some fprs; In those cases it is advantageous to use
24279 load multiple when available. */
24280 if (info->first_fp_reg_save != 64 || !lr_save_p)
24281 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24284 /* Using the "exit" out-of-line routine does not improve code size
24285 if using it would require lr to be saved and if only saving one
24286 or two gprs. */
24287 else if (!lr_save_p && info->first_gp_reg_save > 29)
24288 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24290 /* We can only use load multiple or the out-of-line routines to
24291 restore gprs if we've saved all the registers from
24292 first_gp_reg_save. Otherwise, we risk loading garbage.
24293 Of course, if we have saved out-of-line or used stmw then we know
24294 we haven't skipped any gprs. */
24295 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24296 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24298 int i;
24300 for (i = info->first_gp_reg_save; i < 32; i++)
24301 if (fixed_reg_p (i) || !save_reg_p (i))
24303 strategy |= REST_INLINE_GPRS;
24304 strategy &= ~REST_MULTIPLE;
24305 break;
24309 if (TARGET_ELF && TARGET_64BIT)
24311 if (!(strategy & SAVE_INLINE_FPRS))
24312 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24313 else if (!(strategy & SAVE_INLINE_GPRS)
24314 && info->first_fp_reg_save == 64)
24315 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24317 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24318 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24320 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24321 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24323 return strategy;
24326 /* Calculate the stack information for the current function. This is
24327 complicated by having two separate calling sequences, the AIX calling
24328 sequence and the V.4 calling sequence.
24330 AIX (and Darwin/Mac OS X) stack frames look like:
24331 32-bit 64-bit
24332 SP----> +---------------------------------------+
24333 | back chain to caller | 0 0
24334 +---------------------------------------+
24335 | saved CR | 4 8 (8-11)
24336 +---------------------------------------+
24337 | saved LR | 8 16
24338 +---------------------------------------+
24339 | reserved for compilers | 12 24
24340 +---------------------------------------+
24341 | reserved for binders | 16 32
24342 +---------------------------------------+
24343 | saved TOC pointer | 20 40
24344 +---------------------------------------+
24345 | Parameter save area (P) | 24 48
24346 +---------------------------------------+
24347 | Alloca space (A) | 24+P etc.
24348 +---------------------------------------+
24349 | Local variable space (L) | 24+P+A
24350 +---------------------------------------+
24351 | Float/int conversion temporary (X) | 24+P+A+L
24352 +---------------------------------------+
24353 | Save area for AltiVec registers (W) | 24+P+A+L+X
24354 +---------------------------------------+
24355 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24356 +---------------------------------------+
24357 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24358 +---------------------------------------+
24359 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24360 +---------------------------------------+
24361 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24362 +---------------------------------------+
24363 old SP->| back chain to caller's caller |
24364 +---------------------------------------+
24366 The required alignment for AIX configurations is two words (i.e., 8
24367 or 16 bytes).
24369 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24371 SP----> +---------------------------------------+
24372 | Back chain to caller | 0
24373 +---------------------------------------+
24374 | Save area for CR | 8
24375 +---------------------------------------+
24376 | Saved LR | 16
24377 +---------------------------------------+
24378 | Saved TOC pointer | 24
24379 +---------------------------------------+
24380 | Parameter save area (P) | 32
24381 +---------------------------------------+
24382 | Alloca space (A) | 32+P
24383 +---------------------------------------+
24384 | Local variable space (L) | 32+P+A
24385 +---------------------------------------+
24386 | Save area for AltiVec registers (W) | 32+P+A+L
24387 +---------------------------------------+
24388 | AltiVec alignment padding (Y) | 32+P+A+L+W
24389 +---------------------------------------+
24390 | Save area for GP registers (G) | 32+P+A+L+W+Y
24391 +---------------------------------------+
24392 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24393 +---------------------------------------+
24394 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24395 +---------------------------------------+
24398 V.4 stack frames look like:
24400 SP----> +---------------------------------------+
24401 | back chain to caller | 0
24402 +---------------------------------------+
24403 | caller's saved LR | 4
24404 +---------------------------------------+
24405 | Parameter save area (P) | 8
24406 +---------------------------------------+
24407 | Alloca space (A) | 8+P
24408 +---------------------------------------+
24409 | Varargs save area (V) | 8+P+A
24410 +---------------------------------------+
24411 | Local variable space (L) | 8+P+A+V
24412 +---------------------------------------+
24413 | Float/int conversion temporary (X) | 8+P+A+V+L
24414 +---------------------------------------+
24415 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24416 +---------------------------------------+
24417 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24418 +---------------------------------------+
24419 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24420 +---------------------------------------+
24421 | SPE: area for 64-bit GP registers |
24422 +---------------------------------------+
24423 | SPE alignment padding |
24424 +---------------------------------------+
24425 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24426 +---------------------------------------+
24427 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24428 +---------------------------------------+
24429 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24430 +---------------------------------------+
24431 old SP->| back chain to caller's caller |
24432 +---------------------------------------+
24434 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24435 given. (But note below and in sysv4.h that we require only 8 and
24436 may round up the size of our stack frame anyways. The historical
24437 reason is early versions of powerpc-linux which didn't properly
24438 align the stack at program startup. A happy side-effect is that
24439 -mno-eabi libraries can be used with -meabi programs.)
24441 The EABI configuration defaults to the V.4 layout. However,
24442 the stack alignment requirements may differ. If -mno-eabi is not
24443 given, the required stack alignment is 8 bytes; if -mno-eabi is
24444 given, the required alignment is 16 bytes. (But see V.4 comment
24445 above.) */
24447 #ifndef ABI_STACK_BOUNDARY
24448 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24449 #endif
24451 static rs6000_stack_t *
24452 rs6000_stack_info (void)
24454 /* We should never be called for thunks, we are not set up for that. */
24455 gcc_assert (!cfun->is_thunk);
24457 rs6000_stack_t *info = &stack_info;
24458 int reg_size = TARGET_32BIT ? 4 : 8;
24459 int ehrd_size;
24460 int ehcr_size;
24461 int save_align;
24462 int first_gp;
24463 HOST_WIDE_INT non_fixed_size;
24464 bool using_static_chain_p;
24466 if (reload_completed && info->reload_completed)
24467 return info;
24469 memset (info, 0, sizeof (*info));
24470 info->reload_completed = reload_completed;
24472 if (TARGET_SPE)
24474 /* Cache value so we don't rescan instruction chain over and over. */
24475 if (cfun->machine->spe_insn_chain_scanned_p == 0)
24476 cfun->machine->spe_insn_chain_scanned_p
24477 = spe_func_has_64bit_regs_p () + 1;
24478 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
24481 /* Select which calling sequence. */
24482 info->abi = DEFAULT_ABI;
24484 /* Calculate which registers need to be saved & save area size. */
24485 info->first_gp_reg_save = first_reg_to_save ();
24486 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24487 even if it currently looks like we won't. Reload may need it to
24488 get at a constant; if so, it will have already created a constant
24489 pool entry for it. */
24490 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24491 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24492 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24493 && crtl->uses_const_pool
24494 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24495 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24496 else
24497 first_gp = info->first_gp_reg_save;
24499 info->gp_size = reg_size * (32 - first_gp);
24501 /* For the SPE, we have an additional upper 32-bits on each GPR.
24502 Ideally we should save the entire 64-bits only when the upper
24503 half is used in SIMD instructions. Since we only record
24504 registers live (not the size they are used in), this proves
24505 difficult because we'd have to traverse the instruction chain at
24506 the right time, taking reload into account. This is a real pain,
24507 so we opt to save the GPRs in 64-bits always if but one register
24508 gets used in 64-bits. Otherwise, all the registers in the frame
24509 get saved in 32-bits.
24511 So... since when we save all GPRs (except the SP) in 64-bits, the
24512 traditional GP save area will be empty. */
24513 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24514 info->gp_size = 0;
24516 info->first_fp_reg_save = first_fp_reg_to_save ();
24517 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24519 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24520 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24521 - info->first_altivec_reg_save);
24523 /* Does this function call anything? */
24524 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24526 /* Determine if we need to save the condition code registers. */
24527 if (save_reg_p (CR2_REGNO)
24528 || save_reg_p (CR3_REGNO)
24529 || save_reg_p (CR4_REGNO))
24531 info->cr_save_p = 1;
24532 if (DEFAULT_ABI == ABI_V4)
24533 info->cr_size = reg_size;
24536 /* If the current function calls __builtin_eh_return, then we need
24537 to allocate stack space for registers that will hold data for
24538 the exception handler. */
24539 if (crtl->calls_eh_return)
24541 unsigned int i;
24542 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24543 continue;
24545 /* SPE saves EH registers in 64-bits. */
24546 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
24547 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
24549 else
24550 ehrd_size = 0;
24552 /* In the ELFv2 ABI, we also need to allocate space for separate
24553 CR field save areas if the function calls __builtin_eh_return. */
24554 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24556 /* This hard-codes that we have three call-saved CR fields. */
24557 ehcr_size = 3 * reg_size;
24558 /* We do *not* use the regular CR save mechanism. */
24559 info->cr_save_p = 0;
24561 else
24562 ehcr_size = 0;
24564 /* Determine various sizes. */
24565 info->reg_size = reg_size;
24566 info->fixed_size = RS6000_SAVE_AREA;
24567 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24568 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24569 TARGET_ALTIVEC ? 16 : 8);
24570 if (FRAME_GROWS_DOWNWARD)
24571 info->vars_size
24572 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24573 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24574 - (info->fixed_size + info->vars_size + info->parm_size);
24576 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24577 info->spe_gp_size = 8 * (32 - first_gp);
24579 if (TARGET_ALTIVEC_ABI)
24580 info->vrsave_mask = compute_vrsave_mask ();
24582 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24583 info->vrsave_size = 4;
24585 compute_save_world_info (info);
24587 /* Calculate the offsets. */
24588 switch (DEFAULT_ABI)
24590 case ABI_NONE:
24591 default:
24592 gcc_unreachable ();
24594 case ABI_AIX:
24595 case ABI_ELFv2:
24596 case ABI_DARWIN:
24597 info->fp_save_offset = -info->fp_size;
24598 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24600 if (TARGET_ALTIVEC_ABI)
24602 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24604 /* Align stack so vector save area is on a quadword boundary.
24605 The padding goes above the vectors. */
24606 if (info->altivec_size != 0)
24607 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24609 info->altivec_save_offset = info->vrsave_save_offset
24610 - info->altivec_padding_size
24611 - info->altivec_size;
24612 gcc_assert (info->altivec_size == 0
24613 || info->altivec_save_offset % 16 == 0);
24615 /* Adjust for AltiVec case. */
24616 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24618 else
24619 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24621 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24622 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24623 info->lr_save_offset = 2*reg_size;
24624 break;
24626 case ABI_V4:
24627 info->fp_save_offset = -info->fp_size;
24628 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24629 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24631 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24633 /* Align stack so SPE GPR save area is aligned on a
24634 double-word boundary. */
24635 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
24636 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
24637 else
24638 info->spe_padding_size = 0;
24640 info->spe_gp_save_offset = info->cr_save_offset
24641 - info->spe_padding_size
24642 - info->spe_gp_size;
24644 /* Adjust for SPE case. */
24645 info->ehrd_offset = info->spe_gp_save_offset;
24647 else if (TARGET_ALTIVEC_ABI)
24649 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24651 /* Align stack so vector save area is on a quadword boundary. */
24652 if (info->altivec_size != 0)
24653 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24655 info->altivec_save_offset = info->vrsave_save_offset
24656 - info->altivec_padding_size
24657 - info->altivec_size;
24659 /* Adjust for AltiVec case. */
24660 info->ehrd_offset = info->altivec_save_offset;
24662 else
24663 info->ehrd_offset = info->cr_save_offset;
24665 info->ehrd_offset -= ehrd_size;
24666 info->lr_save_offset = reg_size;
24669 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24670 info->save_size = RS6000_ALIGN (info->fp_size
24671 + info->gp_size
24672 + info->altivec_size
24673 + info->altivec_padding_size
24674 + info->spe_gp_size
24675 + info->spe_padding_size
24676 + ehrd_size
24677 + ehcr_size
24678 + info->cr_size
24679 + info->vrsave_size,
24680 save_align);
24682 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24684 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24685 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24687 /* Determine if we need to save the link register. */
24688 if (info->calls_p
24689 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24690 && crtl->profile
24691 && !TARGET_PROFILE_KERNEL)
24692 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24693 #ifdef TARGET_RELOCATABLE
24694 || (DEFAULT_ABI == ABI_V4
24695 && (TARGET_RELOCATABLE || flag_pic > 1)
24696 && get_pool_size () != 0)
24697 #endif
24698 || rs6000_ra_ever_killed ())
24699 info->lr_save_p = 1;
24701 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24702 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24703 && call_used_regs[STATIC_CHAIN_REGNUM]);
24704 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24706 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24707 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24708 || !(info->savres_strategy & SAVE_INLINE_VRS)
24709 || !(info->savres_strategy & REST_INLINE_GPRS)
24710 || !(info->savres_strategy & REST_INLINE_FPRS)
24711 || !(info->savres_strategy & REST_INLINE_VRS))
24712 info->lr_save_p = 1;
24714 if (info->lr_save_p)
24715 df_set_regs_ever_live (LR_REGNO, true);
24717 /* Determine if we need to allocate any stack frame:
24719 For AIX we need to push the stack if a frame pointer is needed
24720 (because the stack might be dynamically adjusted), if we are
24721 debugging, if we make calls, or if the sum of fp_save, gp_save,
24722 and local variables are more than the space needed to save all
24723 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24724 + 18*8 = 288 (GPR13 reserved).
24726 For V.4 we don't have the stack cushion that AIX uses, but assume
24727 that the debugger can handle stackless frames. */
24729 if (info->calls_p)
24730 info->push_p = 1;
24732 else if (DEFAULT_ABI == ABI_V4)
24733 info->push_p = non_fixed_size != 0;
24735 else if (frame_pointer_needed)
24736 info->push_p = 1;
24738 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24739 info->push_p = 1;
24741 else
24742 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24744 return info;
24747 /* Return true if the current function uses any GPRs in 64-bit SIMD
24748 mode. */
24750 static bool
24751 spe_func_has_64bit_regs_p (void)
24753 rtx_insn *insns, *insn;
24755 /* Functions that save and restore all the call-saved registers will
24756 need to save/restore the registers in 64-bits. */
24757 if (crtl->calls_eh_return
24758 || cfun->calls_setjmp
24759 || crtl->has_nonlocal_goto)
24760 return true;
24762 insns = get_insns ();
24764 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
24766 if (INSN_P (insn))
24768 rtx i;
24770 /* FIXME: This should be implemented with attributes...
24772 (set_attr "spe64" "true")....then,
24773 if (get_spe64(insn)) return true;
24775 It's the only reliable way to do the stuff below. */
24777 i = PATTERN (insn);
24778 if (GET_CODE (i) == SET)
24780 machine_mode mode = GET_MODE (SET_SRC (i));
24782 if (SPE_VECTOR_MODE (mode))
24783 return true;
24784 if (TARGET_E500_DOUBLE
24785 && (mode == DFmode || FLOAT128_2REG_P (mode)))
24786 return true;
24791 return false;
24794 static void
24795 debug_stack_info (rs6000_stack_t *info)
24797 const char *abi_string;
24799 if (! info)
24800 info = rs6000_stack_info ();
24802 fprintf (stderr, "\nStack information for function %s:\n",
24803 ((current_function_decl && DECL_NAME (current_function_decl))
24804 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24805 : "<unknown>"));
24807 switch (info->abi)
24809 default: abi_string = "Unknown"; break;
24810 case ABI_NONE: abi_string = "NONE"; break;
24811 case ABI_AIX: abi_string = "AIX"; break;
24812 case ABI_ELFv2: abi_string = "ELFv2"; break;
24813 case ABI_DARWIN: abi_string = "Darwin"; break;
24814 case ABI_V4: abi_string = "V.4"; break;
24817 fprintf (stderr, "\tABI = %5s\n", abi_string);
24819 if (TARGET_ALTIVEC_ABI)
24820 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24822 if (TARGET_SPE_ABI)
24823 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
24825 if (info->first_gp_reg_save != 32)
24826 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24828 if (info->first_fp_reg_save != 64)
24829 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24831 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24832 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24833 info->first_altivec_reg_save);
24835 if (info->lr_save_p)
24836 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24838 if (info->cr_save_p)
24839 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24841 if (info->vrsave_mask)
24842 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24844 if (info->push_p)
24845 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24847 if (info->calls_p)
24848 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24850 if (info->gp_size)
24851 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24853 if (info->fp_size)
24854 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24856 if (info->altivec_size)
24857 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24858 info->altivec_save_offset);
24860 if (info->spe_gp_size)
24861 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
24862 info->spe_gp_save_offset);
24864 if (info->vrsave_size)
24865 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24866 info->vrsave_save_offset);
24868 if (info->lr_save_p)
24869 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24871 if (info->cr_save_p)
24872 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24874 if (info->varargs_save_offset)
24875 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24877 if (info->total_size)
24878 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24879 info->total_size);
24881 if (info->vars_size)
24882 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24883 info->vars_size);
24885 if (info->parm_size)
24886 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24888 if (info->fixed_size)
24889 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24891 if (info->gp_size)
24892 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24894 if (info->spe_gp_size)
24895 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
24897 if (info->fp_size)
24898 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24900 if (info->altivec_size)
24901 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24903 if (info->vrsave_size)
24904 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24906 if (info->altivec_padding_size)
24907 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24908 info->altivec_padding_size);
24910 if (info->spe_padding_size)
24911 fprintf (stderr, "\tspe_padding_size = %5d\n",
24912 info->spe_padding_size);
24914 if (info->cr_size)
24915 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24917 if (info->save_size)
24918 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24920 if (info->reg_size != 4)
24921 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24923 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24925 fprintf (stderr, "\n");
24929 rs6000_return_addr (int count, rtx frame)
24931 /* Currently we don't optimize very well between prolog and body
24932 code and for PIC code the code can be actually quite bad, so
24933 don't try to be too clever here. */
24934 if (count != 0
24935 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24937 cfun->machine->ra_needs_full_frame = 1;
24939 return
24940 gen_rtx_MEM
24941 (Pmode,
24942 memory_address
24943 (Pmode,
24944 plus_constant (Pmode,
24945 copy_to_reg
24946 (gen_rtx_MEM (Pmode,
24947 memory_address (Pmode, frame))),
24948 RETURN_ADDRESS_OFFSET)));
24951 cfun->machine->ra_need_lr = 1;
24952 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24955 /* Say whether a function is a candidate for sibcall handling or not. */
24957 static bool
24958 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24960 tree fntype;
24962 if (decl)
24963 fntype = TREE_TYPE (decl);
24964 else
24965 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24967 /* We can't do it if the called function has more vector parameters
24968 than the current function; there's nowhere to put the VRsave code. */
24969 if (TARGET_ALTIVEC_ABI
24970 && TARGET_ALTIVEC_VRSAVE
24971 && !(decl && decl == current_function_decl))
24973 function_args_iterator args_iter;
24974 tree type;
24975 int nvreg = 0;
24977 /* Functions with vector parameters are required to have a
24978 prototype, so the argument type info must be available
24979 here. */
24980 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24981 if (TREE_CODE (type) == VECTOR_TYPE
24982 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24983 nvreg++;
24985 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24986 if (TREE_CODE (type) == VECTOR_TYPE
24987 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24988 nvreg--;
24990 if (nvreg > 0)
24991 return false;
24994 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24995 functions, because the callee may have a different TOC pointer to
24996 the caller and there's no way to ensure we restore the TOC when
24997 we return. With the secure-plt SYSV ABI we can't make non-local
24998 calls when -fpic/PIC because the plt call stubs use r30. */
24999 if (DEFAULT_ABI == ABI_DARWIN
25000 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25001 && decl
25002 && !DECL_EXTERNAL (decl)
25003 && !DECL_WEAK (decl)
25004 && (*targetm.binds_local_p) (decl))
25005 || (DEFAULT_ABI == ABI_V4
25006 && (!TARGET_SECURE_PLT
25007 || !flag_pic
25008 || (decl
25009 && (*targetm.binds_local_p) (decl)))))
25011 tree attr_list = TYPE_ATTRIBUTES (fntype);
25013 if (!lookup_attribute ("longcall", attr_list)
25014 || lookup_attribute ("shortcall", attr_list))
25015 return true;
25018 return false;
25021 static int
25022 rs6000_ra_ever_killed (void)
25024 rtx_insn *top;
25025 rtx reg;
25026 rtx_insn *insn;
25028 if (cfun->is_thunk)
25029 return 0;
25031 if (cfun->machine->lr_save_state)
25032 return cfun->machine->lr_save_state - 1;
25034 /* regs_ever_live has LR marked as used if any sibcalls are present,
25035 but this should not force saving and restoring in the
25036 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25037 clobbers LR, so that is inappropriate. */
25039 /* Also, the prologue can generate a store into LR that
25040 doesn't really count, like this:
25042 move LR->R0
25043 bcl to set PIC register
25044 move LR->R31
25045 move R0->LR
25047 When we're called from the epilogue, we need to avoid counting
25048 this as a store. */
25050 push_topmost_sequence ();
25051 top = get_insns ();
25052 pop_topmost_sequence ();
25053 reg = gen_rtx_REG (Pmode, LR_REGNO);
25055 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25057 if (INSN_P (insn))
25059 if (CALL_P (insn))
25061 if (!SIBLING_CALL_P (insn))
25062 return 1;
25064 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25065 return 1;
25066 else if (set_of (reg, insn) != NULL_RTX
25067 && !prologue_epilogue_contains (insn))
25068 return 1;
25071 return 0;
25074 /* Emit instructions needed to load the TOC register.
25075 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25076 a constant pool; or for SVR4 -fpic. */
25078 void
25079 rs6000_emit_load_toc_table (int fromprolog)
25081 rtx dest;
25082 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25084 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25086 char buf[30];
25087 rtx lab, tmp1, tmp2, got;
25089 lab = gen_label_rtx ();
25090 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25091 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25092 if (flag_pic == 2)
25094 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25095 need_toc_init = 1;
25097 else
25098 got = rs6000_got_sym ();
25099 tmp1 = tmp2 = dest;
25100 if (!fromprolog)
25102 tmp1 = gen_reg_rtx (Pmode);
25103 tmp2 = gen_reg_rtx (Pmode);
25105 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25106 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25107 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25108 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25110 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25112 emit_insn (gen_load_toc_v4_pic_si ());
25113 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25115 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25117 char buf[30];
25118 rtx temp0 = (fromprolog
25119 ? gen_rtx_REG (Pmode, 0)
25120 : gen_reg_rtx (Pmode));
25122 if (fromprolog)
25124 rtx symF, symL;
25126 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25127 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25129 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25130 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25132 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25133 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25134 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25136 else
25138 rtx tocsym, lab;
25140 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25141 need_toc_init = 1;
25142 lab = gen_label_rtx ();
25143 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25144 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25145 if (TARGET_LINK_STACK)
25146 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25147 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25149 emit_insn (gen_addsi3 (dest, temp0, dest));
25151 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25153 /* This is for AIX code running in non-PIC ELF32. */
25154 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25156 need_toc_init = 1;
25157 emit_insn (gen_elf_high (dest, realsym));
25158 emit_insn (gen_elf_low (dest, dest, realsym));
25160 else
25162 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25164 if (TARGET_32BIT)
25165 emit_insn (gen_load_toc_aix_si (dest));
25166 else
25167 emit_insn (gen_load_toc_aix_di (dest));
25171 /* Emit instructions to restore the link register after determining where
25172 its value has been stored. */
25174 void
25175 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25177 rs6000_stack_t *info = rs6000_stack_info ();
25178 rtx operands[2];
25180 operands[0] = source;
25181 operands[1] = scratch;
25183 if (info->lr_save_p)
25185 rtx frame_rtx = stack_pointer_rtx;
25186 HOST_WIDE_INT sp_offset = 0;
25187 rtx tmp;
25189 if (frame_pointer_needed
25190 || cfun->calls_alloca
25191 || info->total_size > 32767)
25193 tmp = gen_frame_mem (Pmode, frame_rtx);
25194 emit_move_insn (operands[1], tmp);
25195 frame_rtx = operands[1];
25197 else if (info->push_p)
25198 sp_offset = info->total_size;
25200 tmp = plus_constant (Pmode, frame_rtx,
25201 info->lr_save_offset + sp_offset);
25202 tmp = gen_frame_mem (Pmode, tmp);
25203 emit_move_insn (tmp, operands[0]);
25205 else
25206 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25208 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25209 state of lr_save_p so any change from here on would be a bug. In
25210 particular, stop rs6000_ra_ever_killed from considering the SET
25211 of lr we may have added just above. */
25212 cfun->machine->lr_save_state = info->lr_save_p + 1;
25215 static GTY(()) alias_set_type set = -1;
25217 alias_set_type
25218 get_TOC_alias_set (void)
25220 if (set == -1)
25221 set = new_alias_set ();
25222 return set;
25225 /* This returns nonzero if the current function uses the TOC. This is
25226 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25227 is generated by the ABI_V4 load_toc_* patterns. */
25228 #if TARGET_ELF
25229 static int
25230 uses_TOC (void)
25232 rtx_insn *insn;
25234 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25235 if (INSN_P (insn))
25237 rtx pat = PATTERN (insn);
25238 int i;
25240 if (GET_CODE (pat) == PARALLEL)
25241 for (i = 0; i < XVECLEN (pat, 0); i++)
25243 rtx sub = XVECEXP (pat, 0, i);
25244 if (GET_CODE (sub) == USE)
25246 sub = XEXP (sub, 0);
25247 if (GET_CODE (sub) == UNSPEC
25248 && XINT (sub, 1) == UNSPEC_TOC)
25249 return 1;
25253 return 0;
25255 #endif
25258 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25260 rtx tocrel, tocreg, hi;
25262 if (TARGET_DEBUG_ADDR)
25264 if (GET_CODE (symbol) == SYMBOL_REF)
25265 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25266 XSTR (symbol, 0));
25267 else
25269 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25270 GET_RTX_NAME (GET_CODE (symbol)));
25271 debug_rtx (symbol);
25275 if (!can_create_pseudo_p ())
25276 df_set_regs_ever_live (TOC_REGISTER, true);
25278 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25279 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25280 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25281 return tocrel;
25283 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25284 if (largetoc_reg != NULL)
25286 emit_move_insn (largetoc_reg, hi);
25287 hi = largetoc_reg;
25289 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25292 /* Issue assembly directives that create a reference to the given DWARF
25293 FRAME_TABLE_LABEL from the current function section. */
25294 void
25295 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25297 fprintf (asm_out_file, "\t.ref %s\n",
25298 (* targetm.strip_name_encoding) (frame_table_label));
25301 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25302 and the change to the stack pointer. */
25304 static void
25305 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25307 rtvec p;
25308 int i;
25309 rtx regs[3];
25311 i = 0;
25312 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25313 if (hard_frame_needed)
25314 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25315 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25316 || (hard_frame_needed
25317 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25318 regs[i++] = fp;
25320 p = rtvec_alloc (i);
25321 while (--i >= 0)
25323 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25324 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25327 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25330 /* Emit the correct code for allocating stack space, as insns.
25331 If COPY_REG, make sure a copy of the old frame is left there.
25332 The generated code may use hard register 0 as a temporary. */
25334 static rtx_insn *
25335 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25337 rtx_insn *insn;
25338 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25339 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25340 rtx todec = gen_int_mode (-size, Pmode);
25341 rtx par, set, mem;
25343 if (INTVAL (todec) != -size)
25345 warning (0, "stack frame too large");
25346 emit_insn (gen_trap ());
25347 return 0;
25350 if (crtl->limit_stack)
25352 if (REG_P (stack_limit_rtx)
25353 && REGNO (stack_limit_rtx) > 1
25354 && REGNO (stack_limit_rtx) <= 31)
25356 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
25357 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25358 const0_rtx));
25360 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25361 && TARGET_32BIT
25362 && DEFAULT_ABI == ABI_V4)
25364 rtx toload = gen_rtx_CONST (VOIDmode,
25365 gen_rtx_PLUS (Pmode,
25366 stack_limit_rtx,
25367 GEN_INT (size)));
25369 emit_insn (gen_elf_high (tmp_reg, toload));
25370 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25371 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25372 const0_rtx));
25374 else
25375 warning (0, "stack limit expression is not supported");
25378 if (copy_reg)
25380 if (copy_off != 0)
25381 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25382 else
25383 emit_move_insn (copy_reg, stack_reg);
25386 if (size > 32767)
25388 /* Need a note here so that try_split doesn't get confused. */
25389 if (get_last_insn () == NULL_RTX)
25390 emit_note (NOTE_INSN_DELETED);
25391 insn = emit_move_insn (tmp_reg, todec);
25392 try_split (PATTERN (insn), insn, 0);
25393 todec = tmp_reg;
25396 insn = emit_insn (TARGET_32BIT
25397 ? gen_movsi_update_stack (stack_reg, stack_reg,
25398 todec, stack_reg)
25399 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25400 todec, stack_reg));
25401 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25402 it now and set the alias set/attributes. The above gen_*_update
25403 calls will generate a PARALLEL with the MEM set being the first
25404 operation. */
25405 par = PATTERN (insn);
25406 gcc_assert (GET_CODE (par) == PARALLEL);
25407 set = XVECEXP (par, 0, 0);
25408 gcc_assert (GET_CODE (set) == SET);
25409 mem = SET_DEST (set);
25410 gcc_assert (MEM_P (mem));
25411 MEM_NOTRAP_P (mem) = 1;
25412 set_mem_alias_set (mem, get_frame_alias_set ());
25414 RTX_FRAME_RELATED_P (insn) = 1;
25415 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25416 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25417 GEN_INT (-size))));
25418 return insn;
25421 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25423 #if PROBE_INTERVAL > 32768
25424 #error Cannot use indexed addressing mode for stack probing
25425 #endif
25427 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25428 inclusive. These are offsets from the current stack pointer. */
25430 static void
25431 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25433 /* See if we have a constant small number of probes to generate. If so,
25434 that's the easy case. */
25435 if (first + size <= 32768)
25437 HOST_WIDE_INT i;
25439 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25440 it exceeds SIZE. If only one probe is needed, this will not
25441 generate any code. Then probe at FIRST + SIZE. */
25442 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25443 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25444 -(first + i)));
25446 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25447 -(first + size)));
25450 /* Otherwise, do the same as above, but in a loop. Note that we must be
25451 extra careful with variables wrapping around because we might be at
25452 the very top (or the very bottom) of the address space and we have
25453 to be able to handle this case properly; in particular, we use an
25454 equality test for the loop condition. */
25455 else
25457 HOST_WIDE_INT rounded_size;
25458 rtx r12 = gen_rtx_REG (Pmode, 12);
25459 rtx r0 = gen_rtx_REG (Pmode, 0);
25461 /* Sanity check for the addressing mode we're going to use. */
25462 gcc_assert (first <= 32768);
25464 /* Step 1: round SIZE to the previous multiple of the interval. */
25466 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25469 /* Step 2: compute initial and final value of the loop counter. */
25471 /* TEST_ADDR = SP + FIRST. */
25472 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25473 -first)));
25475 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25476 if (rounded_size > 32768)
25478 emit_move_insn (r0, GEN_INT (-rounded_size));
25479 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25481 else
25482 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25483 -rounded_size)));
25486 /* Step 3: the loop
25490 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25491 probe at TEST_ADDR
25493 while (TEST_ADDR != LAST_ADDR)
25495 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25496 until it is equal to ROUNDED_SIZE. */
25498 if (TARGET_64BIT)
25499 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25500 else
25501 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25504 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25505 that SIZE is equal to ROUNDED_SIZE. */
25507 if (size != rounded_size)
25508 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25512 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25513 absolute addresses. */
25515 const char *
25516 output_probe_stack_range (rtx reg1, rtx reg2)
25518 static int labelno = 0;
25519 char loop_lab[32];
25520 rtx xops[2];
25522 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25524 /* Loop. */
25525 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25527 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25528 xops[0] = reg1;
25529 xops[1] = GEN_INT (-PROBE_INTERVAL);
25530 output_asm_insn ("addi %0,%0,%1", xops);
25532 /* Probe at TEST_ADDR. */
25533 xops[1] = gen_rtx_REG (Pmode, 0);
25534 output_asm_insn ("stw %1,0(%0)", xops);
25536 /* Test if TEST_ADDR == LAST_ADDR. */
25537 xops[1] = reg2;
25538 if (TARGET_64BIT)
25539 output_asm_insn ("cmpd 0,%0,%1", xops);
25540 else
25541 output_asm_insn ("cmpw 0,%0,%1", xops);
25543 /* Branch. */
25544 fputs ("\tbne 0,", asm_out_file);
25545 assemble_name_raw (asm_out_file, loop_lab);
25546 fputc ('\n', asm_out_file);
25548 return "";
25551 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25552 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25553 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25554 deduce these equivalences by itself so it wasn't necessary to hold
25555 its hand so much. Don't be tempted to always supply d2_f_d_e with
25556 the actual cfa register, ie. r31 when we are using a hard frame
25557 pointer. That fails when saving regs off r1, and sched moves the
25558 r31 setup past the reg saves. */
25560 static rtx
25561 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
25562 rtx reg2, rtx repl2)
25564 rtx repl;
25566 if (REGNO (reg) == STACK_POINTER_REGNUM)
25568 gcc_checking_assert (val == 0);
25569 repl = NULL_RTX;
25571 else
25572 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25573 GEN_INT (val));
25575 rtx pat = PATTERN (insn);
25576 if (!repl && !reg2)
25578 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25579 if (GET_CODE (pat) == PARALLEL)
25580 for (int i = 0; i < XVECLEN (pat, 0); i++)
25581 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25583 rtx set = XVECEXP (pat, 0, i);
25585 /* If this PARALLEL has been emitted for out-of-line
25586 register save functions, or store multiple, then omit
25587 eh_frame info for any user-defined global regs. If
25588 eh_frame info is supplied, frame unwinding will
25589 restore a user reg. */
25590 if (!REG_P (SET_SRC (set))
25591 || !fixed_reg_p (REGNO (SET_SRC (set))))
25592 RTX_FRAME_RELATED_P (set) = 1;
25594 RTX_FRAME_RELATED_P (insn) = 1;
25595 return insn;
25598 /* We expect that 'pat' is either a SET or a PARALLEL containing
25599 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25600 are important so they all have to be marked RTX_FRAME_RELATED_P.
25601 Call simplify_replace_rtx on the SETs rather than the whole insn
25602 so as to leave the other stuff alone (for example USE of r12). */
25604 if (GET_CODE (pat) == SET)
25606 if (repl)
25607 pat = simplify_replace_rtx (pat, reg, repl);
25608 if (reg2)
25609 pat = simplify_replace_rtx (pat, reg2, repl2);
25611 else if (GET_CODE (pat) == PARALLEL)
25613 pat = shallow_copy_rtx (pat);
25614 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25616 for (int i = 0; i < XVECLEN (pat, 0); i++)
25617 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25619 rtx set = XVECEXP (pat, 0, i);
25621 if (repl)
25622 set = simplify_replace_rtx (set, reg, repl);
25623 if (reg2)
25624 set = simplify_replace_rtx (set, reg2, repl2);
25625 XVECEXP (pat, 0, i) = set;
25627 /* Omit eh_frame info for any user-defined global regs. */
25628 if (!REG_P (SET_SRC (set))
25629 || !fixed_reg_p (REGNO (SET_SRC (set))))
25630 RTX_FRAME_RELATED_P (set) = 1;
25633 else
25634 gcc_unreachable ();
25636 RTX_FRAME_RELATED_P (insn) = 1;
25637 if (repl || reg2)
25638 add_reg_note (insn, REG_FRAME_RELATED_EXPR, pat);
25640 return insn;
25643 /* Returns an insn that has a vrsave set operation with the
25644 appropriate CLOBBERs. */
25646 static rtx
25647 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25649 int nclobs, i;
25650 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25651 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25653 clobs[0]
25654 = gen_rtx_SET (vrsave,
25655 gen_rtx_UNSPEC_VOLATILE (SImode,
25656 gen_rtvec (2, reg, vrsave),
25657 UNSPECV_SET_VRSAVE));
25659 nclobs = 1;
25661 /* We need to clobber the registers in the mask so the scheduler
25662 does not move sets to VRSAVE before sets of AltiVec registers.
25664 However, if the function receives nonlocal gotos, reload will set
25665 all call saved registers live. We will end up with:
25667 (set (reg 999) (mem))
25668 (parallel [ (set (reg vrsave) (unspec blah))
25669 (clobber (reg 999))])
25671 The clobber will cause the store into reg 999 to be dead, and
25672 flow will attempt to delete an epilogue insn. In this case, we
25673 need an unspec use/set of the register. */
25675 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25676 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25678 if (!epiloguep || call_used_regs [i])
25679 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25680 gen_rtx_REG (V4SImode, i));
25681 else
25683 rtx reg = gen_rtx_REG (V4SImode, i);
25685 clobs[nclobs++]
25686 = gen_rtx_SET (reg,
25687 gen_rtx_UNSPEC (V4SImode,
25688 gen_rtvec (1, reg), 27));
25692 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25694 for (i = 0; i < nclobs; ++i)
25695 XVECEXP (insn, 0, i) = clobs[i];
25697 return insn;
25700 static rtx
25701 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25703 rtx addr, mem;
25705 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25706 mem = gen_frame_mem (GET_MODE (reg), addr);
25707 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25710 static rtx
25711 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25713 return gen_frame_set (reg, frame_reg, offset, false);
25716 static rtx
25717 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25719 return gen_frame_set (reg, frame_reg, offset, true);
25722 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25723 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25725 static rtx
25726 emit_frame_save (rtx frame_reg, machine_mode mode,
25727 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25729 rtx reg, insn;
25731 /* Some cases that need register indexed addressing. */
25732 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25733 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
25734 || (TARGET_E500_DOUBLE && mode == DFmode)
25735 || (TARGET_SPE_ABI
25736 && SPE_VECTOR_MODE (mode)
25737 && !SPE_CONST_OFFSET_OK (offset))));
25739 reg = gen_rtx_REG (mode, regno);
25740 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25741 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25742 NULL_RTX, NULL_RTX);
25745 /* Emit an offset memory reference suitable for a frame store, while
25746 converting to a valid addressing mode. */
25748 static rtx
25749 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25751 rtx int_rtx, offset_rtx;
25753 int_rtx = GEN_INT (offset);
25755 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
25756 || (TARGET_E500_DOUBLE && mode == DFmode))
25758 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
25759 emit_move_insn (offset_rtx, int_rtx);
25761 else
25762 offset_rtx = int_rtx;
25764 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
25767 #ifndef TARGET_FIX_AND_CONTINUE
25768 #define TARGET_FIX_AND_CONTINUE 0
25769 #endif
25771 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25772 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25773 #define LAST_SAVRES_REGISTER 31
25774 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25776 enum {
25777 SAVRES_LR = 0x1,
25778 SAVRES_SAVE = 0x2,
25779 SAVRES_REG = 0x0c,
25780 SAVRES_GPR = 0,
25781 SAVRES_FPR = 4,
25782 SAVRES_VR = 8
25785 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25787 /* Temporary holding space for an out-of-line register save/restore
25788 routine name. */
25789 static char savres_routine_name[30];
25791 /* Return the name for an out-of-line register save/restore routine.
25792 We are saving/restoring GPRs if GPR is true. */
25794 static char *
25795 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
25797 const char *prefix = "";
25798 const char *suffix = "";
25800 /* Different targets are supposed to define
25801 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25802 routine name could be defined with:
25804 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25806 This is a nice idea in practice, but in reality, things are
25807 complicated in several ways:
25809 - ELF targets have save/restore routines for GPRs.
25811 - SPE targets use different prefixes for 32/64-bit registers, and
25812 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
25814 - PPC64 ELF targets have routines for save/restore of GPRs that
25815 differ in what they do with the link register, so having a set
25816 prefix doesn't work. (We only use one of the save routines at
25817 the moment, though.)
25819 - PPC32 elf targets have "exit" versions of the restore routines
25820 that restore the link register and can save some extra space.
25821 These require an extra suffix. (There are also "tail" versions
25822 of the restore routines and "GOT" versions of the save routines,
25823 but we don't generate those at present. Same problems apply,
25824 though.)
25826 We deal with all this by synthesizing our own prefix/suffix and
25827 using that for the simple sprintf call shown above. */
25828 if (TARGET_SPE)
25830 /* No floating point saves on the SPE. */
25831 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
25833 if ((sel & SAVRES_SAVE))
25834 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
25835 else
25836 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
25838 if ((sel & SAVRES_LR))
25839 suffix = "_x";
25841 else if (DEFAULT_ABI == ABI_V4)
25843 if (TARGET_64BIT)
25844 goto aix_names;
25846 if ((sel & SAVRES_REG) == SAVRES_GPR)
25847 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25848 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25849 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25850 else if ((sel & SAVRES_REG) == SAVRES_VR)
25851 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25852 else
25853 abort ();
25855 if ((sel & SAVRES_LR))
25856 suffix = "_x";
25858 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25860 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25861 /* No out-of-line save/restore routines for GPRs on AIX. */
25862 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25863 #endif
25865 aix_names:
25866 if ((sel & SAVRES_REG) == SAVRES_GPR)
25867 prefix = ((sel & SAVRES_SAVE)
25868 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25869 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25870 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25872 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25873 if ((sel & SAVRES_LR))
25874 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25875 else
25876 #endif
25878 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25879 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25882 else if ((sel & SAVRES_REG) == SAVRES_VR)
25883 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25884 else
25885 abort ();
25888 if (DEFAULT_ABI == ABI_DARWIN)
25890 /* The Darwin approach is (slightly) different, in order to be
25891 compatible with code generated by the system toolchain. There is a
25892 single symbol for the start of save sequence, and the code here
25893 embeds an offset into that code on the basis of the first register
25894 to be saved. */
25895 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25896 if ((sel & SAVRES_REG) == SAVRES_GPR)
25897 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25898 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25899 (regno - 13) * 4, prefix, regno);
25900 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25901 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25902 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25903 else if ((sel & SAVRES_REG) == SAVRES_VR)
25904 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25905 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25906 else
25907 abort ();
25909 else
25910 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25912 return savres_routine_name;
25915 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25916 We are saving/restoring GPRs if GPR is true. */
25918 static rtx
25919 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25921 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25922 ? info->first_gp_reg_save
25923 : (sel & SAVRES_REG) == SAVRES_FPR
25924 ? info->first_fp_reg_save - 32
25925 : (sel & SAVRES_REG) == SAVRES_VR
25926 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25927 : -1);
25928 rtx sym;
25929 int select = sel;
25931 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
25932 versions of the gpr routines. */
25933 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
25934 && info->spe_64bit_regs_used)
25935 select ^= SAVRES_FPR ^ SAVRES_GPR;
25937 /* Don't generate bogus routine names. */
25938 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25939 && regno <= LAST_SAVRES_REGISTER
25940 && select >= 0 && select <= 12);
25942 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25944 if (sym == NULL)
25946 char *name;
25948 name = rs6000_savres_routine_name (info, regno, sel);
25950 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25951 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25952 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25955 return sym;
25958 /* Emit a sequence of insns, including a stack tie if needed, for
25959 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25960 reset the stack pointer, but move the base of the frame into
25961 reg UPDT_REGNO for use by out-of-line register restore routines. */
25963 static rtx
25964 rs6000_emit_stack_reset (rs6000_stack_t *info,
25965 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25966 unsigned updt_regno)
25968 rtx updt_reg_rtx;
25970 /* This blockage is needed so that sched doesn't decide to move
25971 the sp change before the register restores. */
25972 if (DEFAULT_ABI == ABI_V4
25973 || (TARGET_SPE_ABI
25974 && info->spe_64bit_regs_used != 0
25975 && info->first_gp_reg_save != 32))
25976 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
25978 /* If we are restoring registers out-of-line, we will be using the
25979 "exit" variants of the restore routines, which will reset the
25980 stack for us. But we do need to point updt_reg into the
25981 right place for those routines. */
25982 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25984 if (frame_off != 0)
25985 return emit_insn (gen_add3_insn (updt_reg_rtx,
25986 frame_reg_rtx, GEN_INT (frame_off)));
25987 else if (REGNO (frame_reg_rtx) != updt_regno)
25988 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25990 return NULL_RTX;
25993 /* Return the register number used as a pointer by out-of-line
25994 save/restore functions. */
25996 static inline unsigned
25997 ptr_regno_for_savres (int sel)
25999 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26000 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26001 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26004 /* Construct a parallel rtx describing the effect of a call to an
26005 out-of-line register save/restore routine, and emit the insn
26006 or jump_insn as appropriate. */
26008 static rtx
26009 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26010 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26011 machine_mode reg_mode, int sel)
26013 int i;
26014 int offset, start_reg, end_reg, n_regs, use_reg;
26015 int reg_size = GET_MODE_SIZE (reg_mode);
26016 rtx sym;
26017 rtvec p;
26018 rtx par, insn;
26020 offset = 0;
26021 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26022 ? info->first_gp_reg_save
26023 : (sel & SAVRES_REG) == SAVRES_FPR
26024 ? info->first_fp_reg_save
26025 : (sel & SAVRES_REG) == SAVRES_VR
26026 ? info->first_altivec_reg_save
26027 : -1);
26028 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26029 ? 32
26030 : (sel & SAVRES_REG) == SAVRES_FPR
26031 ? 64
26032 : (sel & SAVRES_REG) == SAVRES_VR
26033 ? LAST_ALTIVEC_REGNO + 1
26034 : -1);
26035 n_regs = end_reg - start_reg;
26036 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26037 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26038 + n_regs);
26040 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26041 RTVEC_ELT (p, offset++) = ret_rtx;
26043 RTVEC_ELT (p, offset++)
26044 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26046 sym = rs6000_savres_routine_sym (info, sel);
26047 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26049 use_reg = ptr_regno_for_savres (sel);
26050 if ((sel & SAVRES_REG) == SAVRES_VR)
26052 /* Vector regs are saved/restored using [reg+reg] addressing. */
26053 RTVEC_ELT (p, offset++)
26054 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26055 RTVEC_ELT (p, offset++)
26056 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26058 else
26059 RTVEC_ELT (p, offset++)
26060 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26062 for (i = 0; i < end_reg - start_reg; i++)
26063 RTVEC_ELT (p, i + offset)
26064 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26065 frame_reg_rtx, save_area_offset + reg_size * i,
26066 (sel & SAVRES_SAVE) != 0);
26068 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26069 RTVEC_ELT (p, i + offset)
26070 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26072 par = gen_rtx_PARALLEL (VOIDmode, p);
26074 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26076 insn = emit_jump_insn (par);
26077 JUMP_LABEL (insn) = ret_rtx;
26079 else
26080 insn = emit_insn (par);
26081 return insn;
26084 /* Emit code to store CR fields that need to be saved into REG. */
26086 static void
26087 rs6000_emit_move_from_cr (rtx reg)
26089 /* Only the ELFv2 ABI allows storing only selected fields. */
26090 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26092 int i, cr_reg[8], count = 0;
26094 /* Collect CR fields that must be saved. */
26095 for (i = 0; i < 8; i++)
26096 if (save_reg_p (CR0_REGNO + i))
26097 cr_reg[count++] = i;
26099 /* If it's just a single one, use mfcrf. */
26100 if (count == 1)
26102 rtvec p = rtvec_alloc (1);
26103 rtvec r = rtvec_alloc (2);
26104 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26105 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26106 RTVEC_ELT (p, 0)
26107 = gen_rtx_SET (reg,
26108 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26110 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26111 return;
26114 /* ??? It might be better to handle count == 2 / 3 cases here
26115 as well, using logical operations to combine the values. */
26118 emit_insn (gen_movesi_from_cr (reg));
26121 /* Return whether the split-stack arg pointer (r12) is used. */
26123 static bool
26124 split_stack_arg_pointer_used_p (void)
26126 /* If the pseudo holding the arg pointer is no longer a pseudo,
26127 then the arg pointer is used. */
26128 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26129 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26130 || (REGNO (cfun->machine->split_stack_arg_pointer)
26131 < FIRST_PSEUDO_REGISTER)))
26132 return true;
26134 /* Unfortunately we also need to do some code scanning, since
26135 r12 may have been substituted for the pseudo. */
26136 rtx_insn *insn;
26137 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26138 FOR_BB_INSNS (bb, insn)
26139 if (NONDEBUG_INSN_P (insn))
26141 /* A call destroys r12. */
26142 if (CALL_P (insn))
26143 return false;
26145 df_ref use;
26146 FOR_EACH_INSN_USE (use, insn)
26148 rtx x = DF_REF_REG (use);
26149 if (REG_P (x) && REGNO (x) == 12)
26150 return true;
26152 df_ref def;
26153 FOR_EACH_INSN_DEF (def, insn)
26155 rtx x = DF_REF_REG (def);
26156 if (REG_P (x) && REGNO (x) == 12)
26157 return false;
26160 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26163 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26165 static bool
26166 rs6000_global_entry_point_needed_p (void)
26168 /* Only needed for the ELFv2 ABI. */
26169 if (DEFAULT_ABI != ABI_ELFv2)
26170 return false;
26172 /* With -msingle-pic-base, we assume the whole program shares the same
26173 TOC, so no global entry point prologues are needed anywhere. */
26174 if (TARGET_SINGLE_PIC_BASE)
26175 return false;
26177 /* Ensure we have a global entry point for thunks. ??? We could
26178 avoid that if the target routine doesn't need a global entry point,
26179 but we do not know whether this is the case at this point. */
26180 if (cfun->is_thunk)
26181 return true;
26183 /* For regular functions, rs6000_emit_prologue sets this flag if the
26184 routine ever uses the TOC pointer. */
26185 return cfun->machine->r2_setup_needed;
26188 /* Emit function prologue as insns. */
26190 void
26191 rs6000_emit_prologue (void)
26193 rs6000_stack_t *info = rs6000_stack_info ();
26194 machine_mode reg_mode = Pmode;
26195 int reg_size = TARGET_32BIT ? 4 : 8;
26196 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26197 rtx frame_reg_rtx = sp_reg_rtx;
26198 unsigned int cr_save_regno;
26199 rtx cr_save_rtx = NULL_RTX;
26200 rtx insn;
26201 int strategy;
26202 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26203 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26204 && call_used_regs[STATIC_CHAIN_REGNUM]);
26205 int using_split_stack = (flag_split_stack
26206 && (lookup_attribute ("no_split_stack",
26207 DECL_ATTRIBUTES (cfun->decl))
26208 == NULL));
26210 /* Offset to top of frame for frame_reg and sp respectively. */
26211 HOST_WIDE_INT frame_off = 0;
26212 HOST_WIDE_INT sp_off = 0;
26213 /* sp_adjust is the stack adjusting instruction, tracked so that the
26214 insn setting up the split-stack arg pointer can be emitted just
26215 prior to it, when r12 is not used here for other purposes. */
26216 rtx_insn *sp_adjust = 0;
26218 #if CHECKING_P
26219 /* Track and check usage of r0, r11, r12. */
26220 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26221 #define START_USE(R) do \
26223 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26224 reg_inuse |= 1 << (R); \
26225 } while (0)
26226 #define END_USE(R) do \
26228 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26229 reg_inuse &= ~(1 << (R)); \
26230 } while (0)
26231 #define NOT_INUSE(R) do \
26233 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26234 } while (0)
26235 #else
26236 #define START_USE(R) do {} while (0)
26237 #define END_USE(R) do {} while (0)
26238 #define NOT_INUSE(R) do {} while (0)
26239 #endif
26241 if (DEFAULT_ABI == ABI_ELFv2
26242 && !TARGET_SINGLE_PIC_BASE)
26244 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26246 /* With -mminimal-toc we may generate an extra use of r2 below. */
26247 if (TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
26248 cfun->machine->r2_setup_needed = true;
26252 if (flag_stack_usage_info)
26253 current_function_static_stack_size = info->total_size;
26255 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26257 HOST_WIDE_INT size = info->total_size;
26259 if (crtl->is_leaf && !cfun->calls_alloca)
26261 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26262 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26263 size - STACK_CHECK_PROTECT);
26265 else if (size > 0)
26266 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26269 if (TARGET_FIX_AND_CONTINUE)
26271 /* gdb on darwin arranges to forward a function from the old
26272 address by modifying the first 5 instructions of the function
26273 to branch to the overriding function. This is necessary to
26274 permit function pointers that point to the old function to
26275 actually forward to the new function. */
26276 emit_insn (gen_nop ());
26277 emit_insn (gen_nop ());
26278 emit_insn (gen_nop ());
26279 emit_insn (gen_nop ());
26280 emit_insn (gen_nop ());
26283 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26285 reg_mode = V2SImode;
26286 reg_size = 8;
26289 /* Handle world saves specially here. */
26290 if (WORLD_SAVE_P (info))
26292 int i, j, sz;
26293 rtx treg;
26294 rtvec p;
26295 rtx reg0;
26297 /* save_world expects lr in r0. */
26298 reg0 = gen_rtx_REG (Pmode, 0);
26299 if (info->lr_save_p)
26301 insn = emit_move_insn (reg0,
26302 gen_rtx_REG (Pmode, LR_REGNO));
26303 RTX_FRAME_RELATED_P (insn) = 1;
26306 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26307 assumptions about the offsets of various bits of the stack
26308 frame. */
26309 gcc_assert (info->gp_save_offset == -220
26310 && info->fp_save_offset == -144
26311 && info->lr_save_offset == 8
26312 && info->cr_save_offset == 4
26313 && info->push_p
26314 && info->lr_save_p
26315 && (!crtl->calls_eh_return
26316 || info->ehrd_offset == -432)
26317 && info->vrsave_save_offset == -224
26318 && info->altivec_save_offset == -416);
26320 treg = gen_rtx_REG (SImode, 11);
26321 emit_move_insn (treg, GEN_INT (-info->total_size));
26323 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26324 in R11. It also clobbers R12, so beware! */
26326 /* Preserve CR2 for save_world prologues */
26327 sz = 5;
26328 sz += 32 - info->first_gp_reg_save;
26329 sz += 64 - info->first_fp_reg_save;
26330 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26331 p = rtvec_alloc (sz);
26332 j = 0;
26333 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26334 gen_rtx_REG (SImode,
26335 LR_REGNO));
26336 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26337 gen_rtx_SYMBOL_REF (Pmode,
26338 "*save_world"));
26339 /* We do floats first so that the instruction pattern matches
26340 properly. */
26341 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26342 RTVEC_ELT (p, j++)
26343 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26344 ? DFmode : SFmode,
26345 info->first_fp_reg_save + i),
26346 frame_reg_rtx,
26347 info->fp_save_offset + frame_off + 8 * i);
26348 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26349 RTVEC_ELT (p, j++)
26350 = gen_frame_store (gen_rtx_REG (V4SImode,
26351 info->first_altivec_reg_save + i),
26352 frame_reg_rtx,
26353 info->altivec_save_offset + frame_off + 16 * i);
26354 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26355 RTVEC_ELT (p, j++)
26356 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26357 frame_reg_rtx,
26358 info->gp_save_offset + frame_off + reg_size * i);
26360 /* CR register traditionally saved as CR2. */
26361 RTVEC_ELT (p, j++)
26362 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26363 frame_reg_rtx, info->cr_save_offset + frame_off);
26364 /* Explain about use of R0. */
26365 if (info->lr_save_p)
26366 RTVEC_ELT (p, j++)
26367 = gen_frame_store (reg0,
26368 frame_reg_rtx, info->lr_save_offset + frame_off);
26369 /* Explain what happens to the stack pointer. */
26371 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26372 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26375 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26376 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26377 treg, GEN_INT (-info->total_size));
26378 sp_off = frame_off = info->total_size;
26381 strategy = info->savres_strategy;
26383 /* For V.4, update stack before we do any saving and set back pointer. */
26384 if (! WORLD_SAVE_P (info)
26385 && info->push_p
26386 && (DEFAULT_ABI == ABI_V4
26387 || crtl->calls_eh_return))
26389 bool need_r11 = (TARGET_SPE
26390 ? (!(strategy & SAVE_INLINE_GPRS)
26391 && info->spe_64bit_regs_used == 0)
26392 : (!(strategy & SAVE_INLINE_FPRS)
26393 || !(strategy & SAVE_INLINE_GPRS)
26394 || !(strategy & SAVE_INLINE_VRS)));
26395 int ptr_regno = -1;
26396 rtx ptr_reg = NULL_RTX;
26397 int ptr_off = 0;
26399 if (info->total_size < 32767)
26400 frame_off = info->total_size;
26401 else if (need_r11)
26402 ptr_regno = 11;
26403 else if (info->cr_save_p
26404 || info->lr_save_p
26405 || info->first_fp_reg_save < 64
26406 || info->first_gp_reg_save < 32
26407 || info->altivec_size != 0
26408 || info->vrsave_size != 0
26409 || crtl->calls_eh_return)
26410 ptr_regno = 12;
26411 else
26413 /* The prologue won't be saving any regs so there is no need
26414 to set up a frame register to access any frame save area.
26415 We also won't be using frame_off anywhere below, but set
26416 the correct value anyway to protect against future
26417 changes to this function. */
26418 frame_off = info->total_size;
26420 if (ptr_regno != -1)
26422 /* Set up the frame offset to that needed by the first
26423 out-of-line save function. */
26424 START_USE (ptr_regno);
26425 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26426 frame_reg_rtx = ptr_reg;
26427 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26428 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26429 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26430 ptr_off = info->gp_save_offset + info->gp_size;
26431 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26432 ptr_off = info->altivec_save_offset + info->altivec_size;
26433 frame_off = -ptr_off;
26435 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26436 ptr_reg, ptr_off);
26437 if (REGNO (frame_reg_rtx) == 12)
26438 sp_adjust = 0;
26439 sp_off = info->total_size;
26440 if (frame_reg_rtx != sp_reg_rtx)
26441 rs6000_emit_stack_tie (frame_reg_rtx, false);
26444 /* If we use the link register, get it into r0. */
26445 if (!WORLD_SAVE_P (info) && info->lr_save_p)
26447 rtx addr, reg, mem;
26449 reg = gen_rtx_REG (Pmode, 0);
26450 START_USE (0);
26451 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26452 RTX_FRAME_RELATED_P (insn) = 1;
26454 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26455 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26457 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26458 GEN_INT (info->lr_save_offset + frame_off));
26459 mem = gen_rtx_MEM (Pmode, addr);
26460 /* This should not be of rs6000_sr_alias_set, because of
26461 __builtin_return_address. */
26463 insn = emit_move_insn (mem, reg);
26464 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26465 NULL_RTX, NULL_RTX);
26466 END_USE (0);
26470 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26471 r12 will be needed by out-of-line gpr restore. */
26472 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26473 && !(strategy & (SAVE_INLINE_GPRS
26474 | SAVE_NOINLINE_GPRS_SAVES_LR))
26475 ? 11 : 12);
26476 if (!WORLD_SAVE_P (info)
26477 && info->cr_save_p
26478 && REGNO (frame_reg_rtx) != cr_save_regno
26479 && !(using_static_chain_p && cr_save_regno == 11)
26480 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26482 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26483 START_USE (cr_save_regno);
26484 rs6000_emit_move_from_cr (cr_save_rtx);
26487 /* Do any required saving of fpr's. If only one or two to save, do
26488 it ourselves. Otherwise, call function. */
26489 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26491 int i;
26492 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26493 if (save_reg_p (info->first_fp_reg_save + i))
26494 emit_frame_save (frame_reg_rtx,
26495 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26496 ? DFmode : SFmode),
26497 info->first_fp_reg_save + i,
26498 info->fp_save_offset + frame_off + 8 * i,
26499 sp_off - frame_off);
26501 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26503 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26504 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26505 unsigned ptr_regno = ptr_regno_for_savres (sel);
26506 rtx ptr_reg = frame_reg_rtx;
26508 if (REGNO (frame_reg_rtx) == ptr_regno)
26509 gcc_checking_assert (frame_off == 0);
26510 else
26512 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26513 NOT_INUSE (ptr_regno);
26514 emit_insn (gen_add3_insn (ptr_reg,
26515 frame_reg_rtx, GEN_INT (frame_off)));
26517 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26518 info->fp_save_offset,
26519 info->lr_save_offset,
26520 DFmode, sel);
26521 rs6000_frame_related (insn, ptr_reg, sp_off,
26522 NULL_RTX, NULL_RTX);
26523 if (lr)
26524 END_USE (0);
26527 /* Save GPRs. This is done as a PARALLEL if we are using
26528 the store-multiple instructions. */
26529 if (!WORLD_SAVE_P (info)
26530 && TARGET_SPE_ABI
26531 && info->spe_64bit_regs_used != 0
26532 && info->first_gp_reg_save != 32)
26534 int i;
26535 rtx spe_save_area_ptr;
26536 HOST_WIDE_INT save_off;
26537 int ool_adjust = 0;
26539 /* Determine whether we can address all of the registers that need
26540 to be saved with an offset from frame_reg_rtx that fits in
26541 the small const field for SPE memory instructions. */
26542 int spe_regs_addressable
26543 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
26544 + reg_size * (32 - info->first_gp_reg_save - 1))
26545 && (strategy & SAVE_INLINE_GPRS));
26547 if (spe_regs_addressable)
26549 spe_save_area_ptr = frame_reg_rtx;
26550 save_off = frame_off;
26552 else
26554 /* Make r11 point to the start of the SPE save area. We need
26555 to be careful here if r11 is holding the static chain. If
26556 it is, then temporarily save it in r0. */
26557 HOST_WIDE_INT offset;
26559 if (!(strategy & SAVE_INLINE_GPRS))
26560 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
26561 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
26562 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
26563 save_off = frame_off - offset;
26565 if (using_static_chain_p)
26567 rtx r0 = gen_rtx_REG (Pmode, 0);
26569 START_USE (0);
26570 gcc_assert (info->first_gp_reg_save > 11);
26572 emit_move_insn (r0, spe_save_area_ptr);
26574 else if (REGNO (frame_reg_rtx) != 11)
26575 START_USE (11);
26577 emit_insn (gen_addsi3 (spe_save_area_ptr,
26578 frame_reg_rtx, GEN_INT (offset)));
26579 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
26580 frame_off = -info->spe_gp_save_offset + ool_adjust;
26583 if ((strategy & SAVE_INLINE_GPRS))
26585 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26586 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
26587 emit_frame_save (spe_save_area_ptr, reg_mode,
26588 info->first_gp_reg_save + i,
26589 (info->spe_gp_save_offset + save_off
26590 + reg_size * i),
26591 sp_off - save_off);
26593 else
26595 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
26596 info->spe_gp_save_offset + save_off,
26597 0, reg_mode,
26598 SAVRES_SAVE | SAVRES_GPR);
26600 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
26601 NULL_RTX, NULL_RTX);
26604 /* Move the static chain pointer back. */
26605 if (!spe_regs_addressable)
26607 if (using_static_chain_p)
26609 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
26610 END_USE (0);
26612 else if (REGNO (frame_reg_rtx) != 11)
26613 END_USE (11);
26616 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26618 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26619 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26620 unsigned ptr_regno = ptr_regno_for_savres (sel);
26621 rtx ptr_reg = frame_reg_rtx;
26622 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26623 int end_save = info->gp_save_offset + info->gp_size;
26624 int ptr_off;
26626 if (ptr_regno == 12)
26627 sp_adjust = 0;
26628 if (!ptr_set_up)
26629 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26631 /* Need to adjust r11 (r12) if we saved any FPRs. */
26632 if (end_save + frame_off != 0)
26634 rtx offset = GEN_INT (end_save + frame_off);
26636 if (ptr_set_up)
26637 frame_off = -end_save;
26638 else
26639 NOT_INUSE (ptr_regno);
26640 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26642 else if (!ptr_set_up)
26644 NOT_INUSE (ptr_regno);
26645 emit_move_insn (ptr_reg, frame_reg_rtx);
26647 ptr_off = -end_save;
26648 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26649 info->gp_save_offset + ptr_off,
26650 info->lr_save_offset + ptr_off,
26651 reg_mode, sel);
26652 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26653 NULL_RTX, NULL_RTX);
26654 if (lr)
26655 END_USE (0);
26657 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26659 rtvec p;
26660 int i;
26661 p = rtvec_alloc (32 - info->first_gp_reg_save);
26662 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26663 RTVEC_ELT (p, i)
26664 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26665 frame_reg_rtx,
26666 info->gp_save_offset + frame_off + reg_size * i);
26667 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26668 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26669 NULL_RTX, NULL_RTX);
26671 else if (!WORLD_SAVE_P (info))
26673 int i;
26674 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26675 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
26676 emit_frame_save (frame_reg_rtx, reg_mode,
26677 info->first_gp_reg_save + i,
26678 info->gp_save_offset + frame_off + reg_size * i,
26679 sp_off - frame_off);
26682 if (crtl->calls_eh_return)
26684 unsigned int i;
26685 rtvec p;
26687 for (i = 0; ; ++i)
26689 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26690 if (regno == INVALID_REGNUM)
26691 break;
26694 p = rtvec_alloc (i);
26696 for (i = 0; ; ++i)
26698 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26699 if (regno == INVALID_REGNUM)
26700 break;
26702 insn
26703 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26704 sp_reg_rtx,
26705 info->ehrd_offset + sp_off + reg_size * (int) i);
26706 RTVEC_ELT (p, i) = insn;
26707 RTX_FRAME_RELATED_P (insn) = 1;
26710 insn = emit_insn (gen_blockage ());
26711 RTX_FRAME_RELATED_P (insn) = 1;
26712 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26715 /* In AIX ABI we need to make sure r2 is really saved. */
26716 if (TARGET_AIX && crtl->calls_eh_return)
26718 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26719 rtx save_insn, join_insn, note;
26720 long toc_restore_insn;
26722 tmp_reg = gen_rtx_REG (Pmode, 11);
26723 tmp_reg_si = gen_rtx_REG (SImode, 11);
26724 if (using_static_chain_p)
26726 START_USE (0);
26727 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26729 else
26730 START_USE (11);
26731 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26732 /* Peek at instruction to which this function returns. If it's
26733 restoring r2, then we know we've already saved r2. We can't
26734 unconditionally save r2 because the value we have will already
26735 be updated if we arrived at this function via a plt call or
26736 toc adjusting stub. */
26737 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26738 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26739 + RS6000_TOC_SAVE_SLOT);
26740 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26741 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26742 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26743 validate_condition_mode (EQ, CCUNSmode);
26744 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26745 emit_insn (gen_rtx_SET (compare_result,
26746 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26747 toc_save_done = gen_label_rtx ();
26748 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26749 gen_rtx_EQ (VOIDmode, compare_result,
26750 const0_rtx),
26751 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26752 pc_rtx);
26753 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26754 JUMP_LABEL (jump) = toc_save_done;
26755 LABEL_NUSES (toc_save_done) += 1;
26757 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26758 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26759 sp_off - frame_off);
26761 emit_label (toc_save_done);
26763 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26764 have a CFG that has different saves along different paths.
26765 Move the note to a dummy blockage insn, which describes that
26766 R2 is unconditionally saved after the label. */
26767 /* ??? An alternate representation might be a special insn pattern
26768 containing both the branch and the store. That might let the
26769 code that minimizes the number of DW_CFA_advance opcodes better
26770 freedom in placing the annotations. */
26771 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26772 if (note)
26773 remove_note (save_insn, note);
26774 else
26775 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26776 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26777 RTX_FRAME_RELATED_P (save_insn) = 0;
26779 join_insn = emit_insn (gen_blockage ());
26780 REG_NOTES (join_insn) = note;
26781 RTX_FRAME_RELATED_P (join_insn) = 1;
26783 if (using_static_chain_p)
26785 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26786 END_USE (0);
26788 else
26789 END_USE (11);
26792 /* Save CR if we use any that must be preserved. */
26793 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26795 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26796 GEN_INT (info->cr_save_offset + frame_off));
26797 rtx mem = gen_frame_mem (SImode, addr);
26799 /* If we didn't copy cr before, do so now using r0. */
26800 if (cr_save_rtx == NULL_RTX)
26802 START_USE (0);
26803 cr_save_rtx = gen_rtx_REG (SImode, 0);
26804 rs6000_emit_move_from_cr (cr_save_rtx);
26807 /* Saving CR requires a two-instruction sequence: one instruction
26808 to move the CR to a general-purpose register, and a second
26809 instruction that stores the GPR to memory.
26811 We do not emit any DWARF CFI records for the first of these,
26812 because we cannot properly represent the fact that CR is saved in
26813 a register. One reason is that we cannot express that multiple
26814 CR fields are saved; another reason is that on 64-bit, the size
26815 of the CR register in DWARF (4 bytes) differs from the size of
26816 a general-purpose register.
26818 This means if any intervening instruction were to clobber one of
26819 the call-saved CR fields, we'd have incorrect CFI. To prevent
26820 this from happening, we mark the store to memory as a use of
26821 those CR fields, which prevents any such instruction from being
26822 scheduled in between the two instructions. */
26823 rtx crsave_v[9];
26824 int n_crsave = 0;
26825 int i;
26827 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
26828 for (i = 0; i < 8; i++)
26829 if (save_reg_p (CR0_REGNO + i))
26830 crsave_v[n_crsave++]
26831 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26833 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
26834 gen_rtvec_v (n_crsave, crsave_v)));
26835 END_USE (REGNO (cr_save_rtx));
26837 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26838 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26839 so we need to construct a frame expression manually. */
26840 RTX_FRAME_RELATED_P (insn) = 1;
26842 /* Update address to be stack-pointer relative, like
26843 rs6000_frame_related would do. */
26844 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26845 GEN_INT (info->cr_save_offset + sp_off));
26846 mem = gen_frame_mem (SImode, addr);
26848 if (DEFAULT_ABI == ABI_ELFv2)
26850 /* In the ELFv2 ABI we generate separate CFI records for each
26851 CR field that was actually saved. They all point to the
26852 same 32-bit stack slot. */
26853 rtx crframe[8];
26854 int n_crframe = 0;
26856 for (i = 0; i < 8; i++)
26857 if (save_reg_p (CR0_REGNO + i))
26859 crframe[n_crframe]
26860 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
26862 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
26863 n_crframe++;
26866 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26867 gen_rtx_PARALLEL (VOIDmode,
26868 gen_rtvec_v (n_crframe, crframe)));
26870 else
26872 /* In other ABIs, by convention, we use a single CR regnum to
26873 represent the fact that all call-saved CR fields are saved.
26874 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
26875 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
26876 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
26880 /* In the ELFv2 ABI we need to save all call-saved CR fields into
26881 *separate* slots if the routine calls __builtin_eh_return, so
26882 that they can be independently restored by the unwinder. */
26883 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
26885 int i, cr_off = info->ehcr_offset;
26886 rtx crsave;
26888 /* ??? We might get better performance by using multiple mfocrf
26889 instructions. */
26890 crsave = gen_rtx_REG (SImode, 0);
26891 emit_insn (gen_movesi_from_cr (crsave));
26893 for (i = 0; i < 8; i++)
26894 if (!call_used_regs[CR0_REGNO + i])
26896 rtvec p = rtvec_alloc (2);
26897 RTVEC_ELT (p, 0)
26898 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
26899 RTVEC_ELT (p, 1)
26900 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26902 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26904 RTX_FRAME_RELATED_P (insn) = 1;
26905 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26906 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
26907 sp_reg_rtx, cr_off + sp_off));
26909 cr_off += reg_size;
26913 /* Update stack and set back pointer unless this is V.4,
26914 for which it was done previously. */
26915 if (!WORLD_SAVE_P (info) && info->push_p
26916 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
26918 rtx ptr_reg = NULL;
26919 int ptr_off = 0;
26921 /* If saving altivec regs we need to be able to address all save
26922 locations using a 16-bit offset. */
26923 if ((strategy & SAVE_INLINE_VRS) == 0
26924 || (info->altivec_size != 0
26925 && (info->altivec_save_offset + info->altivec_size - 16
26926 + info->total_size - frame_off) > 32767)
26927 || (info->vrsave_size != 0
26928 && (info->vrsave_save_offset
26929 + info->total_size - frame_off) > 32767))
26931 int sel = SAVRES_SAVE | SAVRES_VR;
26932 unsigned ptr_regno = ptr_regno_for_savres (sel);
26934 if (using_static_chain_p
26935 && ptr_regno == STATIC_CHAIN_REGNUM)
26936 ptr_regno = 12;
26937 if (REGNO (frame_reg_rtx) != ptr_regno)
26938 START_USE (ptr_regno);
26939 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26940 frame_reg_rtx = ptr_reg;
26941 ptr_off = info->altivec_save_offset + info->altivec_size;
26942 frame_off = -ptr_off;
26944 else if (REGNO (frame_reg_rtx) == 1)
26945 frame_off = info->total_size;
26946 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26947 ptr_reg, ptr_off);
26948 if (REGNO (frame_reg_rtx) == 12)
26949 sp_adjust = 0;
26950 sp_off = info->total_size;
26951 if (frame_reg_rtx != sp_reg_rtx)
26952 rs6000_emit_stack_tie (frame_reg_rtx, false);
26955 /* Set frame pointer, if needed. */
26956 if (frame_pointer_needed)
26958 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
26959 sp_reg_rtx);
26960 RTX_FRAME_RELATED_P (insn) = 1;
26963 /* Save AltiVec registers if needed. Save here because the red zone does
26964 not always include AltiVec registers. */
26965 if (!WORLD_SAVE_P (info)
26966 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
26968 int end_save = info->altivec_save_offset + info->altivec_size;
26969 int ptr_off;
26970 /* Oddly, the vector save/restore functions point r0 at the end
26971 of the save area, then use r11 or r12 to load offsets for
26972 [reg+reg] addressing. */
26973 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
26974 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
26975 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
26977 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
26978 NOT_INUSE (0);
26979 if (scratch_regno == 12)
26980 sp_adjust = 0;
26981 if (end_save + frame_off != 0)
26983 rtx offset = GEN_INT (end_save + frame_off);
26985 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26987 else
26988 emit_move_insn (ptr_reg, frame_reg_rtx);
26990 ptr_off = -end_save;
26991 insn = rs6000_emit_savres_rtx (info, scratch_reg,
26992 info->altivec_save_offset + ptr_off,
26993 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
26994 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
26995 NULL_RTX, NULL_RTX);
26996 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
26998 /* The oddity mentioned above clobbered our frame reg. */
26999 emit_move_insn (frame_reg_rtx, ptr_reg);
27000 frame_off = ptr_off;
27003 else if (!WORLD_SAVE_P (info)
27004 && info->altivec_size != 0)
27006 int i;
27008 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27009 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27011 rtx areg, savereg, mem;
27012 HOST_WIDE_INT offset;
27014 offset = (info->altivec_save_offset + frame_off
27015 + 16 * (i - info->first_altivec_reg_save));
27017 savereg = gen_rtx_REG (V4SImode, i);
27019 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27021 mem = gen_frame_mem (V4SImode,
27022 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27023 GEN_INT (offset)));
27024 insn = emit_insn (gen_rtx_SET (mem, savereg));
27025 areg = NULL_RTX;
27027 else
27029 NOT_INUSE (0);
27030 areg = gen_rtx_REG (Pmode, 0);
27031 emit_move_insn (areg, GEN_INT (offset));
27033 /* AltiVec addressing mode is [reg+reg]. */
27034 mem = gen_frame_mem (V4SImode,
27035 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27037 /* Rather than emitting a generic move, force use of the stvx
27038 instruction, which we always want on ISA 2.07 (power8) systems.
27039 In particular we don't want xxpermdi/stxvd2x for little
27040 endian. */
27041 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27044 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27045 areg, GEN_INT (offset));
27049 /* VRSAVE is a bit vector representing which AltiVec registers
27050 are used. The OS uses this to determine which vector
27051 registers to save on a context switch. We need to save
27052 VRSAVE on the stack frame, add whatever AltiVec registers we
27053 used in this function, and do the corresponding magic in the
27054 epilogue. */
27056 if (!WORLD_SAVE_P (info)
27057 && info->vrsave_size != 0)
27059 rtx reg, vrsave;
27060 int offset;
27061 int save_regno;
27063 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
27064 be using r12 as frame_reg_rtx and r11 as the static chain
27065 pointer for nested functions. */
27066 save_regno = 12;
27067 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27068 && !using_static_chain_p)
27069 save_regno = 11;
27070 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27072 save_regno = 11;
27073 if (using_static_chain_p)
27074 save_regno = 0;
27077 NOT_INUSE (save_regno);
27078 reg = gen_rtx_REG (SImode, save_regno);
27079 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27080 if (TARGET_MACHO)
27081 emit_insn (gen_get_vrsave_internal (reg));
27082 else
27083 emit_insn (gen_rtx_SET (reg, vrsave));
27085 /* Save VRSAVE. */
27086 offset = info->vrsave_save_offset + frame_off;
27087 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
27089 /* Include the registers in the mask. */
27090 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
27092 insn = emit_insn (generate_set_vrsave (reg, info, 0));
27095 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27096 if (!TARGET_SINGLE_PIC_BASE
27097 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
27098 || (DEFAULT_ABI == ABI_V4
27099 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27100 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27102 /* If emit_load_toc_table will use the link register, we need to save
27103 it. We use R12 for this purpose because emit_load_toc_table
27104 can use register 0. This allows us to use a plain 'blr' to return
27105 from the procedure more often. */
27106 int save_LR_around_toc_setup = (TARGET_ELF
27107 && DEFAULT_ABI == ABI_V4
27108 && flag_pic
27109 && ! info->lr_save_p
27110 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27111 if (save_LR_around_toc_setup)
27113 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27114 rtx tmp = gen_rtx_REG (Pmode, 12);
27116 sp_adjust = 0;
27117 insn = emit_move_insn (tmp, lr);
27118 RTX_FRAME_RELATED_P (insn) = 1;
27120 rs6000_emit_load_toc_table (TRUE);
27122 insn = emit_move_insn (lr, tmp);
27123 add_reg_note (insn, REG_CFA_RESTORE, lr);
27124 RTX_FRAME_RELATED_P (insn) = 1;
27126 else
27127 rs6000_emit_load_toc_table (TRUE);
27130 #if TARGET_MACHO
27131 if (!TARGET_SINGLE_PIC_BASE
27132 && DEFAULT_ABI == ABI_DARWIN
27133 && flag_pic && crtl->uses_pic_offset_table)
27135 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27136 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27138 /* Save and restore LR locally around this call (in R0). */
27139 if (!info->lr_save_p)
27140 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27142 emit_insn (gen_load_macho_picbase (src));
27144 emit_move_insn (gen_rtx_REG (Pmode,
27145 RS6000_PIC_OFFSET_TABLE_REGNUM),
27146 lr);
27148 if (!info->lr_save_p)
27149 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27151 #endif
27153 /* If we need to, save the TOC register after doing the stack setup.
27154 Do not emit eh frame info for this save. The unwinder wants info,
27155 conceptually attached to instructions in this function, about
27156 register values in the caller of this function. This R2 may have
27157 already been changed from the value in the caller.
27158 We don't attempt to write accurate DWARF EH frame info for R2
27159 because code emitted by gcc for a (non-pointer) function call
27160 doesn't save and restore R2. Instead, R2 is managed out-of-line
27161 by a linker generated plt call stub when the function resides in
27162 a shared library. This behavior is costly to describe in DWARF,
27163 both in terms of the size of DWARF info and the time taken in the
27164 unwinder to interpret it. R2 changes, apart from the
27165 calls_eh_return case earlier in this function, are handled by
27166 linux-unwind.h frob_update_context. */
27167 if (rs6000_save_toc_in_prologue_p ())
27169 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27170 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27173 if (using_split_stack && split_stack_arg_pointer_used_p ())
27175 /* Set up the arg pointer (r12) for -fsplit-stack code. If
27176 __morestack was called, it left the arg pointer to the old
27177 stack in r29. Otherwise, the arg pointer is the top of the
27178 current frame. */
27179 cfun->machine->split_stack_argp_used = true;
27180 if (sp_adjust)
27182 rtx r12 = gen_rtx_REG (Pmode, 12);
27183 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
27184 emit_insn_before (set_r12, sp_adjust);
27186 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
27188 rtx r12 = gen_rtx_REG (Pmode, 12);
27189 if (frame_off == 0)
27190 emit_move_insn (r12, frame_reg_rtx);
27191 else
27192 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27194 if (info->push_p)
27196 rtx r12 = gen_rtx_REG (Pmode, 12);
27197 rtx r29 = gen_rtx_REG (Pmode, 29);
27198 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27199 rtx not_more = gen_label_rtx ();
27200 rtx jump;
27202 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27203 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27204 gen_rtx_LABEL_REF (VOIDmode, not_more),
27205 pc_rtx);
27206 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27207 JUMP_LABEL (jump) = not_more;
27208 LABEL_NUSES (not_more) += 1;
27209 emit_move_insn (r12, r29);
27210 emit_label (not_more);
27215 /* Output .extern statements for the save/restore routines we use. */
27217 static void
27218 rs6000_output_savres_externs (FILE *file)
27220 rs6000_stack_t *info = rs6000_stack_info ();
27222 if (TARGET_DEBUG_STACK)
27223 debug_stack_info (info);
27225 /* Write .extern for any function we will call to save and restore
27226 fp values. */
27227 if (info->first_fp_reg_save < 64
27228 && !TARGET_MACHO
27229 && !TARGET_ELF)
27231 char *name;
27232 int regno = info->first_fp_reg_save - 32;
27234 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27236 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27237 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27238 name = rs6000_savres_routine_name (info, regno, sel);
27239 fprintf (file, "\t.extern %s\n", name);
27241 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27243 bool lr = (info->savres_strategy
27244 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27245 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27246 name = rs6000_savres_routine_name (info, regno, sel);
27247 fprintf (file, "\t.extern %s\n", name);
27252 /* Write function prologue. */
27254 static void
27255 rs6000_output_function_prologue (FILE *file,
27256 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
27258 if (!cfun->is_thunk)
27259 rs6000_output_savres_externs (file);
27261 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27262 immediately after the global entry point label. */
27263 if (rs6000_global_entry_point_needed_p ())
27265 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27267 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27269 if (TARGET_CMODEL != CMODEL_LARGE)
27271 /* In the small and medium code models, we assume the TOC is less
27272 2 GB away from the text section, so it can be computed via the
27273 following two-instruction sequence. */
27274 char buf[256];
27276 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27277 fprintf (file, "0:\taddis 2,12,.TOC.-");
27278 assemble_name (file, buf);
27279 fprintf (file, "@ha\n");
27280 fprintf (file, "\taddi 2,2,.TOC.-");
27281 assemble_name (file, buf);
27282 fprintf (file, "@l\n");
27284 else
27286 /* In the large code model, we allow arbitrary offsets between the
27287 TOC and the text section, so we have to load the offset from
27288 memory. The data field is emitted directly before the global
27289 entry point in rs6000_elf_declare_function_name. */
27290 char buf[256];
27292 #ifdef HAVE_AS_ENTRY_MARKERS
27293 /* If supported by the linker, emit a marker relocation. If the
27294 total code size of the final executable or shared library
27295 happens to fit into 2 GB after all, the linker will replace
27296 this code sequence with the sequence for the small or medium
27297 code model. */
27298 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27299 #endif
27300 fprintf (file, "\tld 2,");
27301 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27302 assemble_name (file, buf);
27303 fprintf (file, "-");
27304 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27305 assemble_name (file, buf);
27306 fprintf (file, "(12)\n");
27307 fprintf (file, "\tadd 2,2,12\n");
27310 fputs ("\t.localentry\t", file);
27311 assemble_name (file, name);
27312 fputs (",.-", file);
27313 assemble_name (file, name);
27314 fputs ("\n", file);
27317 /* Output -mprofile-kernel code. This needs to be done here instead of
27318 in output_function_profile since it must go after the ELFv2 ABI
27319 local entry point. */
27320 if (TARGET_PROFILE_KERNEL && crtl->profile)
27322 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27323 gcc_assert (!TARGET_32BIT);
27325 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27327 /* In the ELFv2 ABI we have no compiler stack word. It must be
27328 the resposibility of _mcount to preserve the static chain
27329 register if required. */
27330 if (DEFAULT_ABI != ABI_ELFv2
27331 && cfun->static_chain_decl != NULL)
27333 asm_fprintf (file, "\tstd %s,24(%s)\n",
27334 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27335 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27336 asm_fprintf (file, "\tld %s,24(%s)\n",
27337 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27339 else
27340 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27343 rs6000_pic_labelno++;
27346 /* -mprofile-kernel code calls mcount before the function prolog,
27347 so a profiled leaf function should stay a leaf function. */
27348 static bool
27349 rs6000_keep_leaf_when_profiled ()
27351 return TARGET_PROFILE_KERNEL;
27354 /* Non-zero if vmx regs are restored before the frame pop, zero if
27355 we restore after the pop when possible. */
27356 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27358 /* Restoring cr is a two step process: loading a reg from the frame
27359 save, then moving the reg to cr. For ABI_V4 we must let the
27360 unwinder know that the stack location is no longer valid at or
27361 before the stack deallocation, but we can't emit a cfa_restore for
27362 cr at the stack deallocation like we do for other registers.
27363 The trouble is that it is possible for the move to cr to be
27364 scheduled after the stack deallocation. So say exactly where cr
27365 is located on each of the two insns. */
27367 static rtx
27368 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27370 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27371 rtx reg = gen_rtx_REG (SImode, regno);
27372 rtx_insn *insn = emit_move_insn (reg, mem);
27374 if (!exit_func && DEFAULT_ABI == ABI_V4)
27376 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27377 rtx set = gen_rtx_SET (reg, cr);
27379 add_reg_note (insn, REG_CFA_REGISTER, set);
27380 RTX_FRAME_RELATED_P (insn) = 1;
27382 return reg;
27385 /* Reload CR from REG. */
27387 static void
27388 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27390 int count = 0;
27391 int i;
27393 if (using_mfcr_multiple)
27395 for (i = 0; i < 8; i++)
27396 if (save_reg_p (CR0_REGNO + i))
27397 count++;
27398 gcc_assert (count);
27401 if (using_mfcr_multiple && count > 1)
27403 rtx_insn *insn;
27404 rtvec p;
27405 int ndx;
27407 p = rtvec_alloc (count);
27409 ndx = 0;
27410 for (i = 0; i < 8; i++)
27411 if (save_reg_p (CR0_REGNO + i))
27413 rtvec r = rtvec_alloc (2);
27414 RTVEC_ELT (r, 0) = reg;
27415 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27416 RTVEC_ELT (p, ndx) =
27417 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27418 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27419 ndx++;
27421 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27422 gcc_assert (ndx == count);
27424 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27425 CR field separately. */
27426 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27428 for (i = 0; i < 8; i++)
27429 if (save_reg_p (CR0_REGNO + i))
27430 add_reg_note (insn, REG_CFA_RESTORE,
27431 gen_rtx_REG (SImode, CR0_REGNO + i));
27433 RTX_FRAME_RELATED_P (insn) = 1;
27436 else
27437 for (i = 0; i < 8; i++)
27438 if (save_reg_p (CR0_REGNO + i))
27440 rtx insn = emit_insn (gen_movsi_to_cr_one
27441 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27443 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27444 CR field separately, attached to the insn that in fact
27445 restores this particular CR field. */
27446 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27448 add_reg_note (insn, REG_CFA_RESTORE,
27449 gen_rtx_REG (SImode, CR0_REGNO + i));
27451 RTX_FRAME_RELATED_P (insn) = 1;
27455 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27456 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27457 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27459 rtx_insn *insn = get_last_insn ();
27460 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27462 add_reg_note (insn, REG_CFA_RESTORE, cr);
27463 RTX_FRAME_RELATED_P (insn) = 1;
27467 /* Like cr, the move to lr instruction can be scheduled after the
27468 stack deallocation, but unlike cr, its stack frame save is still
27469 valid. So we only need to emit the cfa_restore on the correct
27470 instruction. */
27472 static void
27473 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27475 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27476 rtx reg = gen_rtx_REG (Pmode, regno);
27478 emit_move_insn (reg, mem);
27481 static void
27482 restore_saved_lr (int regno, bool exit_func)
27484 rtx reg = gen_rtx_REG (Pmode, regno);
27485 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27486 rtx_insn *insn = emit_move_insn (lr, reg);
27488 if (!exit_func && flag_shrink_wrap)
27490 add_reg_note (insn, REG_CFA_RESTORE, lr);
27491 RTX_FRAME_RELATED_P (insn) = 1;
27495 static rtx
27496 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27498 if (DEFAULT_ABI == ABI_ELFv2)
27500 int i;
27501 for (i = 0; i < 8; i++)
27502 if (save_reg_p (CR0_REGNO + i))
27504 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27505 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27506 cfa_restores);
27509 else if (info->cr_save_p)
27510 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27511 gen_rtx_REG (SImode, CR2_REGNO),
27512 cfa_restores);
27514 if (info->lr_save_p)
27515 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27516 gen_rtx_REG (Pmode, LR_REGNO),
27517 cfa_restores);
27518 return cfa_restores;
27521 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27522 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27523 below stack pointer not cloberred by signals. */
27525 static inline bool
27526 offset_below_red_zone_p (HOST_WIDE_INT offset)
27528 return offset < (DEFAULT_ABI == ABI_V4
27530 : TARGET_32BIT ? -220 : -288);
27533 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27535 static void
27536 emit_cfa_restores (rtx cfa_restores)
27538 rtx_insn *insn = get_last_insn ();
27539 rtx *loc = &REG_NOTES (insn);
27541 while (*loc)
27542 loc = &XEXP (*loc, 1);
27543 *loc = cfa_restores;
27544 RTX_FRAME_RELATED_P (insn) = 1;
27547 /* Emit function epilogue as insns. */
27549 void
27550 rs6000_emit_epilogue (int sibcall)
27552 rs6000_stack_t *info;
27553 int restoring_GPRs_inline;
27554 int restoring_FPRs_inline;
27555 int using_load_multiple;
27556 int using_mtcr_multiple;
27557 int use_backchain_to_restore_sp;
27558 int restore_lr;
27559 int strategy;
27560 HOST_WIDE_INT frame_off = 0;
27561 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27562 rtx frame_reg_rtx = sp_reg_rtx;
27563 rtx cfa_restores = NULL_RTX;
27564 rtx insn;
27565 rtx cr_save_reg = NULL_RTX;
27566 machine_mode reg_mode = Pmode;
27567 int reg_size = TARGET_32BIT ? 4 : 8;
27568 int i;
27569 bool exit_func;
27570 unsigned ptr_regno;
27572 info = rs6000_stack_info ();
27574 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
27576 reg_mode = V2SImode;
27577 reg_size = 8;
27580 strategy = info->savres_strategy;
27581 using_load_multiple = strategy & REST_MULTIPLE;
27582 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27583 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27584 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27585 || rs6000_cpu == PROCESSOR_PPC603
27586 || rs6000_cpu == PROCESSOR_PPC750
27587 || optimize_size);
27588 /* Restore via the backchain when we have a large frame, since this
27589 is more efficient than an addis, addi pair. The second condition
27590 here will not trigger at the moment; We don't actually need a
27591 frame pointer for alloca, but the generic parts of the compiler
27592 give us one anyway. */
27593 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27594 ? info->lr_save_offset
27595 : 0) > 32767
27596 || (cfun->calls_alloca
27597 && !frame_pointer_needed));
27598 restore_lr = (info->lr_save_p
27599 && (restoring_FPRs_inline
27600 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27601 && (restoring_GPRs_inline
27602 || info->first_fp_reg_save < 64));
27604 if (WORLD_SAVE_P (info))
27606 int i, j;
27607 char rname[30];
27608 const char *alloc_rname;
27609 rtvec p;
27611 /* eh_rest_world_r10 will return to the location saved in the LR
27612 stack slot (which is not likely to be our caller.)
27613 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27614 rest_world is similar, except any R10 parameter is ignored.
27615 The exception-handling stuff that was here in 2.95 is no
27616 longer necessary. */
27618 p = rtvec_alloc (9
27620 + 32 - info->first_gp_reg_save
27621 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27622 + 63 + 1 - info->first_fp_reg_save);
27624 strcpy (rname, ((crtl->calls_eh_return) ?
27625 "*eh_rest_world_r10" : "*rest_world"));
27626 alloc_rname = ggc_strdup (rname);
27628 j = 0;
27629 RTVEC_ELT (p, j++) = ret_rtx;
27630 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27631 gen_rtx_REG (Pmode,
27632 LR_REGNO));
27633 RTVEC_ELT (p, j++)
27634 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27635 /* The instruction pattern requires a clobber here;
27636 it is shared with the restVEC helper. */
27637 RTVEC_ELT (p, j++)
27638 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27641 /* CR register traditionally saved as CR2. */
27642 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27643 RTVEC_ELT (p, j++)
27644 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27645 if (flag_shrink_wrap)
27647 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27648 gen_rtx_REG (Pmode, LR_REGNO),
27649 cfa_restores);
27650 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27654 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27656 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27657 RTVEC_ELT (p, j++)
27658 = gen_frame_load (reg,
27659 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27660 if (flag_shrink_wrap)
27661 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27663 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27665 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27666 RTVEC_ELT (p, j++)
27667 = gen_frame_load (reg,
27668 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27669 if (flag_shrink_wrap)
27670 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27672 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27674 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27675 ? DFmode : SFmode),
27676 info->first_fp_reg_save + i);
27677 RTVEC_ELT (p, j++)
27678 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27679 if (flag_shrink_wrap)
27680 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27682 RTVEC_ELT (p, j++)
27683 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27684 RTVEC_ELT (p, j++)
27685 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27686 RTVEC_ELT (p, j++)
27687 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27688 RTVEC_ELT (p, j++)
27689 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27690 RTVEC_ELT (p, j++)
27691 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27692 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27694 if (flag_shrink_wrap)
27696 REG_NOTES (insn) = cfa_restores;
27697 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27698 RTX_FRAME_RELATED_P (insn) = 1;
27700 return;
27703 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27704 if (info->push_p)
27705 frame_off = info->total_size;
27707 /* Restore AltiVec registers if we must do so before adjusting the
27708 stack. */
27709 if (info->altivec_size != 0
27710 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27711 || (DEFAULT_ABI != ABI_V4
27712 && offset_below_red_zone_p (info->altivec_save_offset))))
27714 int i;
27715 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27717 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27718 if (use_backchain_to_restore_sp)
27720 int frame_regno = 11;
27722 if ((strategy & REST_INLINE_VRS) == 0)
27724 /* Of r11 and r12, select the one not clobbered by an
27725 out-of-line restore function for the frame register. */
27726 frame_regno = 11 + 12 - scratch_regno;
27728 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27729 emit_move_insn (frame_reg_rtx,
27730 gen_rtx_MEM (Pmode, sp_reg_rtx));
27731 frame_off = 0;
27733 else if (frame_pointer_needed)
27734 frame_reg_rtx = hard_frame_pointer_rtx;
27736 if ((strategy & REST_INLINE_VRS) == 0)
27738 int end_save = info->altivec_save_offset + info->altivec_size;
27739 int ptr_off;
27740 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27741 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27743 if (end_save + frame_off != 0)
27745 rtx offset = GEN_INT (end_save + frame_off);
27747 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27749 else
27750 emit_move_insn (ptr_reg, frame_reg_rtx);
27752 ptr_off = -end_save;
27753 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27754 info->altivec_save_offset + ptr_off,
27755 0, V4SImode, SAVRES_VR);
27757 else
27759 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27760 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27762 rtx addr, areg, mem, insn;
27763 rtx reg = gen_rtx_REG (V4SImode, i);
27764 HOST_WIDE_INT offset
27765 = (info->altivec_save_offset + frame_off
27766 + 16 * (i - info->first_altivec_reg_save));
27768 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27770 mem = gen_frame_mem (V4SImode,
27771 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27772 GEN_INT (offset)));
27773 insn = gen_rtx_SET (reg, mem);
27775 else
27777 areg = gen_rtx_REG (Pmode, 0);
27778 emit_move_insn (areg, GEN_INT (offset));
27780 /* AltiVec addressing mode is [reg+reg]. */
27781 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27782 mem = gen_frame_mem (V4SImode, addr);
27784 /* Rather than emitting a generic move, force use of the
27785 lvx instruction, which we always want. In particular we
27786 don't want lxvd2x/xxpermdi for little endian. */
27787 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27790 (void) emit_insn (insn);
27794 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27795 if (((strategy & REST_INLINE_VRS) == 0
27796 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27797 && (flag_shrink_wrap
27798 || (offset_below_red_zone_p
27799 (info->altivec_save_offset
27800 + 16 * (i - info->first_altivec_reg_save)))))
27802 rtx reg = gen_rtx_REG (V4SImode, i);
27803 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27807 /* Restore VRSAVE if we must do so before adjusting the stack. */
27808 if (info->vrsave_size != 0
27809 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27810 || (DEFAULT_ABI != ABI_V4
27811 && offset_below_red_zone_p (info->vrsave_save_offset))))
27813 rtx reg;
27815 if (frame_reg_rtx == sp_reg_rtx)
27817 if (use_backchain_to_restore_sp)
27819 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27820 emit_move_insn (frame_reg_rtx,
27821 gen_rtx_MEM (Pmode, sp_reg_rtx));
27822 frame_off = 0;
27824 else if (frame_pointer_needed)
27825 frame_reg_rtx = hard_frame_pointer_rtx;
27828 reg = gen_rtx_REG (SImode, 12);
27829 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27830 info->vrsave_save_offset + frame_off));
27832 emit_insn (generate_set_vrsave (reg, info, 1));
27835 insn = NULL_RTX;
27836 /* If we have a large stack frame, restore the old stack pointer
27837 using the backchain. */
27838 if (use_backchain_to_restore_sp)
27840 if (frame_reg_rtx == sp_reg_rtx)
27842 /* Under V.4, don't reset the stack pointer until after we're done
27843 loading the saved registers. */
27844 if (DEFAULT_ABI == ABI_V4)
27845 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27847 insn = emit_move_insn (frame_reg_rtx,
27848 gen_rtx_MEM (Pmode, sp_reg_rtx));
27849 frame_off = 0;
27851 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27852 && DEFAULT_ABI == ABI_V4)
27853 /* frame_reg_rtx has been set up by the altivec restore. */
27855 else
27857 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27858 frame_reg_rtx = sp_reg_rtx;
27861 /* If we have a frame pointer, we can restore the old stack pointer
27862 from it. */
27863 else if (frame_pointer_needed)
27865 frame_reg_rtx = sp_reg_rtx;
27866 if (DEFAULT_ABI == ABI_V4)
27867 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27868 /* Prevent reordering memory accesses against stack pointer restore. */
27869 else if (cfun->calls_alloca
27870 || offset_below_red_zone_p (-info->total_size))
27871 rs6000_emit_stack_tie (frame_reg_rtx, true);
27873 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
27874 GEN_INT (info->total_size)));
27875 frame_off = 0;
27877 else if (info->push_p
27878 && DEFAULT_ABI != ABI_V4
27879 && !crtl->calls_eh_return)
27881 /* Prevent reordering memory accesses against stack pointer restore. */
27882 if (cfun->calls_alloca
27883 || offset_below_red_zone_p (-info->total_size))
27884 rs6000_emit_stack_tie (frame_reg_rtx, false);
27885 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
27886 GEN_INT (info->total_size)));
27887 frame_off = 0;
27889 if (insn && frame_reg_rtx == sp_reg_rtx)
27891 if (cfa_restores)
27893 REG_NOTES (insn) = cfa_restores;
27894 cfa_restores = NULL_RTX;
27896 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27897 RTX_FRAME_RELATED_P (insn) = 1;
27900 /* Restore AltiVec registers if we have not done so already. */
27901 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27902 && info->altivec_size != 0
27903 && (DEFAULT_ABI == ABI_V4
27904 || !offset_below_red_zone_p (info->altivec_save_offset)))
27906 int i;
27908 if ((strategy & REST_INLINE_VRS) == 0)
27910 int end_save = info->altivec_save_offset + info->altivec_size;
27911 int ptr_off;
27912 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27913 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27914 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27916 if (end_save + frame_off != 0)
27918 rtx offset = GEN_INT (end_save + frame_off);
27920 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27922 else
27923 emit_move_insn (ptr_reg, frame_reg_rtx);
27925 ptr_off = -end_save;
27926 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27927 info->altivec_save_offset + ptr_off,
27928 0, V4SImode, SAVRES_VR);
27929 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27931 /* Frame reg was clobbered by out-of-line save. Restore it
27932 from ptr_reg, and if we are calling out-of-line gpr or
27933 fpr restore set up the correct pointer and offset. */
27934 unsigned newptr_regno = 1;
27935 if (!restoring_GPRs_inline)
27937 bool lr = info->gp_save_offset + info->gp_size == 0;
27938 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
27939 newptr_regno = ptr_regno_for_savres (sel);
27940 end_save = info->gp_save_offset + info->gp_size;
27942 else if (!restoring_FPRs_inline)
27944 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
27945 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27946 newptr_regno = ptr_regno_for_savres (sel);
27947 end_save = info->fp_save_offset + info->fp_size;
27950 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
27951 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
27953 if (end_save + ptr_off != 0)
27955 rtx offset = GEN_INT (end_save + ptr_off);
27957 frame_off = -end_save;
27958 if (TARGET_32BIT)
27959 emit_insn (gen_addsi3_carry (frame_reg_rtx,
27960 ptr_reg, offset));
27961 else
27962 emit_insn (gen_adddi3_carry (frame_reg_rtx,
27963 ptr_reg, offset));
27965 else
27967 frame_off = ptr_off;
27968 emit_move_insn (frame_reg_rtx, ptr_reg);
27972 else
27974 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27975 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27977 rtx addr, areg, mem, insn;
27978 rtx reg = gen_rtx_REG (V4SImode, i);
27979 HOST_WIDE_INT offset
27980 = (info->altivec_save_offset + frame_off
27981 + 16 * (i - info->first_altivec_reg_save));
27983 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
27985 mem = gen_frame_mem (V4SImode,
27986 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27987 GEN_INT (offset)));
27988 insn = gen_rtx_SET (reg, mem);
27990 else
27992 areg = gen_rtx_REG (Pmode, 0);
27993 emit_move_insn (areg, GEN_INT (offset));
27995 /* AltiVec addressing mode is [reg+reg]. */
27996 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27997 mem = gen_frame_mem (V4SImode, addr);
27999 /* Rather than emitting a generic move, force use of the
28000 lvx instruction, which we always want. In particular we
28001 don't want lxvd2x/xxpermdi for little endian. */
28002 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28005 (void) emit_insn (insn);
28009 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28010 if (((strategy & REST_INLINE_VRS) == 0
28011 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28012 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28014 rtx reg = gen_rtx_REG (V4SImode, i);
28015 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28019 /* Restore VRSAVE if we have not done so already. */
28020 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28021 && info->vrsave_size != 0
28022 && (DEFAULT_ABI == ABI_V4
28023 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28025 rtx reg;
28027 reg = gen_rtx_REG (SImode, 12);
28028 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28029 info->vrsave_save_offset + frame_off));
28031 emit_insn (generate_set_vrsave (reg, info, 1));
28034 /* If we exit by an out-of-line restore function on ABI_V4 then that
28035 function will deallocate the stack, so we don't need to worry
28036 about the unwinder restoring cr from an invalid stack frame
28037 location. */
28038 exit_func = (!restoring_FPRs_inline
28039 || (!restoring_GPRs_inline
28040 && info->first_fp_reg_save == 64));
28042 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28043 *separate* slots if the routine calls __builtin_eh_return, so
28044 that they can be independently restored by the unwinder. */
28045 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28047 int i, cr_off = info->ehcr_offset;
28049 for (i = 0; i < 8; i++)
28050 if (!call_used_regs[CR0_REGNO + i])
28052 rtx reg = gen_rtx_REG (SImode, 0);
28053 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28054 cr_off + frame_off));
28056 insn = emit_insn (gen_movsi_to_cr_one
28057 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28059 if (!exit_func && flag_shrink_wrap)
28061 add_reg_note (insn, REG_CFA_RESTORE,
28062 gen_rtx_REG (SImode, CR0_REGNO + i));
28064 RTX_FRAME_RELATED_P (insn) = 1;
28067 cr_off += reg_size;
28071 /* Get the old lr if we saved it. If we are restoring registers
28072 out-of-line, then the out-of-line routines can do this for us. */
28073 if (restore_lr && restoring_GPRs_inline)
28074 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28076 /* Get the old cr if we saved it. */
28077 if (info->cr_save_p)
28079 unsigned cr_save_regno = 12;
28081 if (!restoring_GPRs_inline)
28083 /* Ensure we don't use the register used by the out-of-line
28084 gpr register restore below. */
28085 bool lr = info->gp_save_offset + info->gp_size == 0;
28086 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28087 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28089 if (gpr_ptr_regno == 12)
28090 cr_save_regno = 11;
28091 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28093 else if (REGNO (frame_reg_rtx) == 12)
28094 cr_save_regno = 11;
28096 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28097 info->cr_save_offset + frame_off,
28098 exit_func);
28101 /* Set LR here to try to overlap restores below. */
28102 if (restore_lr && restoring_GPRs_inline)
28103 restore_saved_lr (0, exit_func);
28105 /* Load exception handler data registers, if needed. */
28106 if (crtl->calls_eh_return)
28108 unsigned int i, regno;
28110 if (TARGET_AIX)
28112 rtx reg = gen_rtx_REG (reg_mode, 2);
28113 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28114 frame_off + RS6000_TOC_SAVE_SLOT));
28117 for (i = 0; ; ++i)
28119 rtx mem;
28121 regno = EH_RETURN_DATA_REGNO (i);
28122 if (regno == INVALID_REGNUM)
28123 break;
28125 /* Note: possible use of r0 here to address SPE regs. */
28126 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28127 info->ehrd_offset + frame_off
28128 + reg_size * (int) i);
28130 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28134 /* Restore GPRs. This is done as a PARALLEL if we are using
28135 the load-multiple instructions. */
28136 if (TARGET_SPE_ABI
28137 && info->spe_64bit_regs_used
28138 && info->first_gp_reg_save != 32)
28140 /* Determine whether we can address all of the registers that need
28141 to be saved with an offset from frame_reg_rtx that fits in
28142 the small const field for SPE memory instructions. */
28143 int spe_regs_addressable
28144 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
28145 + reg_size * (32 - info->first_gp_reg_save - 1))
28146 && restoring_GPRs_inline);
28148 if (!spe_regs_addressable)
28150 int ool_adjust = 0;
28151 rtx old_frame_reg_rtx = frame_reg_rtx;
28152 /* Make r11 point to the start of the SPE save area. We worried about
28153 not clobbering it when we were saving registers in the prologue.
28154 There's no need to worry here because the static chain is passed
28155 anew to every function. */
28157 if (!restoring_GPRs_inline)
28158 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
28159 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28160 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
28161 GEN_INT (info->spe_gp_save_offset
28162 + frame_off
28163 - ool_adjust)));
28164 /* Keep the invariant that frame_reg_rtx + frame_off points
28165 at the top of the stack frame. */
28166 frame_off = -info->spe_gp_save_offset + ool_adjust;
28169 if (restoring_GPRs_inline)
28171 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
28173 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28174 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28176 rtx offset, addr, mem, reg;
28178 /* We're doing all this to ensure that the immediate offset
28179 fits into the immediate field of 'evldd'. */
28180 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
28182 offset = GEN_INT (spe_offset + reg_size * i);
28183 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
28184 mem = gen_rtx_MEM (V2SImode, addr);
28185 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28187 emit_move_insn (reg, mem);
28190 else
28191 rs6000_emit_savres_rtx (info, frame_reg_rtx,
28192 info->spe_gp_save_offset + frame_off,
28193 info->lr_save_offset + frame_off,
28194 reg_mode,
28195 SAVRES_GPR | SAVRES_LR);
28197 else if (!restoring_GPRs_inline)
28199 /* We are jumping to an out-of-line function. */
28200 rtx ptr_reg;
28201 int end_save = info->gp_save_offset + info->gp_size;
28202 bool can_use_exit = end_save == 0;
28203 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28204 int ptr_off;
28206 /* Emit stack reset code if we need it. */
28207 ptr_regno = ptr_regno_for_savres (sel);
28208 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28209 if (can_use_exit)
28210 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28211 else if (end_save + frame_off != 0)
28212 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28213 GEN_INT (end_save + frame_off)));
28214 else if (REGNO (frame_reg_rtx) != ptr_regno)
28215 emit_move_insn (ptr_reg, frame_reg_rtx);
28216 if (REGNO (frame_reg_rtx) == ptr_regno)
28217 frame_off = -end_save;
28219 if (can_use_exit && info->cr_save_p)
28220 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28222 ptr_off = -end_save;
28223 rs6000_emit_savres_rtx (info, ptr_reg,
28224 info->gp_save_offset + ptr_off,
28225 info->lr_save_offset + ptr_off,
28226 reg_mode, sel);
28228 else if (using_load_multiple)
28230 rtvec p;
28231 p = rtvec_alloc (32 - info->first_gp_reg_save);
28232 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28233 RTVEC_ELT (p, i)
28234 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28235 frame_reg_rtx,
28236 info->gp_save_offset + frame_off + reg_size * i);
28237 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28239 else
28241 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28242 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
28243 emit_insn (gen_frame_load
28244 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28245 frame_reg_rtx,
28246 info->gp_save_offset + frame_off + reg_size * i));
28249 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28251 /* If the frame pointer was used then we can't delay emitting
28252 a REG_CFA_DEF_CFA note. This must happen on the insn that
28253 restores the frame pointer, r31. We may have already emitted
28254 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28255 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28256 be harmless if emitted. */
28257 if (frame_pointer_needed)
28259 insn = get_last_insn ();
28260 add_reg_note (insn, REG_CFA_DEF_CFA,
28261 plus_constant (Pmode, frame_reg_rtx, frame_off));
28262 RTX_FRAME_RELATED_P (insn) = 1;
28265 /* Set up cfa_restores. We always need these when
28266 shrink-wrapping. If not shrink-wrapping then we only need
28267 the cfa_restore when the stack location is no longer valid.
28268 The cfa_restores must be emitted on or before the insn that
28269 invalidates the stack, and of course must not be emitted
28270 before the insn that actually does the restore. The latter
28271 is why it is a bad idea to emit the cfa_restores as a group
28272 on the last instruction here that actually does a restore:
28273 That insn may be reordered with respect to others doing
28274 restores. */
28275 if (flag_shrink_wrap
28276 && !restoring_GPRs_inline
28277 && info->first_fp_reg_save == 64)
28278 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28280 for (i = info->first_gp_reg_save; i < 32; i++)
28281 if (!restoring_GPRs_inline
28282 || using_load_multiple
28283 || rs6000_reg_live_or_pic_offset_p (i))
28285 rtx reg = gen_rtx_REG (reg_mode, i);
28287 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28291 if (!restoring_GPRs_inline
28292 && info->first_fp_reg_save == 64)
28294 /* We are jumping to an out-of-line function. */
28295 if (cfa_restores)
28296 emit_cfa_restores (cfa_restores);
28297 return;
28300 if (restore_lr && !restoring_GPRs_inline)
28302 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28303 restore_saved_lr (0, exit_func);
28306 /* Restore fpr's if we need to do it without calling a function. */
28307 if (restoring_FPRs_inline)
28308 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28309 if (save_reg_p (info->first_fp_reg_save + i))
28311 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28312 ? DFmode : SFmode),
28313 info->first_fp_reg_save + i);
28314 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28315 info->fp_save_offset + frame_off + 8 * i));
28316 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28317 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28320 /* If we saved cr, restore it here. Just those that were used. */
28321 if (info->cr_save_p)
28322 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28324 /* If this is V.4, unwind the stack pointer after all of the loads
28325 have been done, or set up r11 if we are restoring fp out of line. */
28326 ptr_regno = 1;
28327 if (!restoring_FPRs_inline)
28329 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28330 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28331 ptr_regno = ptr_regno_for_savres (sel);
28334 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
28335 if (REGNO (frame_reg_rtx) == ptr_regno)
28336 frame_off = 0;
28338 if (insn && restoring_FPRs_inline)
28340 if (cfa_restores)
28342 REG_NOTES (insn) = cfa_restores;
28343 cfa_restores = NULL_RTX;
28345 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28346 RTX_FRAME_RELATED_P (insn) = 1;
28349 if (crtl->calls_eh_return)
28351 rtx sa = EH_RETURN_STACKADJ_RTX;
28352 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28355 if (!sibcall)
28357 rtvec p;
28358 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28359 if (! restoring_FPRs_inline)
28361 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
28362 RTVEC_ELT (p, 0) = ret_rtx;
28364 else
28366 if (cfa_restores)
28368 /* We can't hang the cfa_restores off a simple return,
28369 since the shrink-wrap code sometimes uses an existing
28370 return. This means there might be a path from
28371 pre-prologue code to this return, and dwarf2cfi code
28372 wants the eh_frame unwinder state to be the same on
28373 all paths to any point. So we need to emit the
28374 cfa_restores before the return. For -m64 we really
28375 don't need epilogue cfa_restores at all, except for
28376 this irritating dwarf2cfi with shrink-wrap
28377 requirement; The stack red-zone means eh_frame info
28378 from the prologue telling the unwinder to restore
28379 from the stack is perfectly good right to the end of
28380 the function. */
28381 emit_insn (gen_blockage ());
28382 emit_cfa_restores (cfa_restores);
28383 cfa_restores = NULL_RTX;
28385 p = rtvec_alloc (2);
28386 RTVEC_ELT (p, 0) = simple_return_rtx;
28389 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
28390 ? gen_rtx_USE (VOIDmode,
28391 gen_rtx_REG (Pmode, LR_REGNO))
28392 : gen_rtx_CLOBBER (VOIDmode,
28393 gen_rtx_REG (Pmode, LR_REGNO)));
28395 /* If we have to restore more than two FP registers, branch to the
28396 restore function. It will return to our caller. */
28397 if (! restoring_FPRs_inline)
28399 int i;
28400 int reg;
28401 rtx sym;
28403 if (flag_shrink_wrap)
28404 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28406 sym = rs6000_savres_routine_sym (info,
28407 SAVRES_FPR | (lr ? SAVRES_LR : 0));
28408 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
28409 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28410 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28412 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28414 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28416 RTVEC_ELT (p, i + 4)
28417 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28418 if (flag_shrink_wrap)
28419 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28420 cfa_restores);
28424 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28427 if (cfa_restores)
28429 if (sibcall)
28430 /* Ensure the cfa_restores are hung off an insn that won't
28431 be reordered above other restores. */
28432 emit_insn (gen_blockage ());
28434 emit_cfa_restores (cfa_restores);
28438 /* Write function epilogue. */
28440 static void
28441 rs6000_output_function_epilogue (FILE *file,
28442 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
28444 #if TARGET_MACHO
28445 macho_branch_islands ();
28446 /* Mach-O doesn't support labels at the end of objects, so if
28447 it looks like we might want one, insert a NOP. */
28449 rtx_insn *insn = get_last_insn ();
28450 rtx_insn *deleted_debug_label = NULL;
28451 while (insn
28452 && NOTE_P (insn)
28453 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28455 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28456 notes only, instead set their CODE_LABEL_NUMBER to -1,
28457 otherwise there would be code generation differences
28458 in between -g and -g0. */
28459 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28460 deleted_debug_label = insn;
28461 insn = PREV_INSN (insn);
28463 if (insn
28464 && (LABEL_P (insn)
28465 || (NOTE_P (insn)
28466 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
28467 fputs ("\tnop\n", file);
28468 else if (deleted_debug_label)
28469 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28470 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28471 CODE_LABEL_NUMBER (insn) = -1;
28473 #endif
28475 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28476 on its format.
28478 We don't output a traceback table if -finhibit-size-directive was
28479 used. The documentation for -finhibit-size-directive reads
28480 ``don't output a @code{.size} assembler directive, or anything
28481 else that would cause trouble if the function is split in the
28482 middle, and the two halves are placed at locations far apart in
28483 memory.'' The traceback table has this property, since it
28484 includes the offset from the start of the function to the
28485 traceback table itself.
28487 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28488 different traceback table. */
28489 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28490 && ! flag_inhibit_size_directive
28491 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28493 const char *fname = NULL;
28494 const char *language_string = lang_hooks.name;
28495 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28496 int i;
28497 int optional_tbtab;
28498 rs6000_stack_t *info = rs6000_stack_info ();
28500 if (rs6000_traceback == traceback_full)
28501 optional_tbtab = 1;
28502 else if (rs6000_traceback == traceback_part)
28503 optional_tbtab = 0;
28504 else
28505 optional_tbtab = !optimize_size && !TARGET_ELF;
28507 if (optional_tbtab)
28509 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28510 while (*fname == '.') /* V.4 encodes . in the name */
28511 fname++;
28513 /* Need label immediately before tbtab, so we can compute
28514 its offset from the function start. */
28515 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28516 ASM_OUTPUT_LABEL (file, fname);
28519 /* The .tbtab pseudo-op can only be used for the first eight
28520 expressions, since it can't handle the possibly variable
28521 length fields that follow. However, if you omit the optional
28522 fields, the assembler outputs zeros for all optional fields
28523 anyways, giving each variable length field is minimum length
28524 (as defined in sys/debug.h). Thus we can not use the .tbtab
28525 pseudo-op at all. */
28527 /* An all-zero word flags the start of the tbtab, for debuggers
28528 that have to find it by searching forward from the entry
28529 point or from the current pc. */
28530 fputs ("\t.long 0\n", file);
28532 /* Tbtab format type. Use format type 0. */
28533 fputs ("\t.byte 0,", file);
28535 /* Language type. Unfortunately, there does not seem to be any
28536 official way to discover the language being compiled, so we
28537 use language_string.
28538 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28539 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28540 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28541 either, so for now use 0. */
28542 if (lang_GNU_C ()
28543 || ! strcmp (language_string, "GNU GIMPLE")
28544 || ! strcmp (language_string, "GNU Go")
28545 || ! strcmp (language_string, "libgccjit"))
28546 i = 0;
28547 else if (! strcmp (language_string, "GNU F77")
28548 || lang_GNU_Fortran ())
28549 i = 1;
28550 else if (! strcmp (language_string, "GNU Pascal"))
28551 i = 2;
28552 else if (! strcmp (language_string, "GNU Ada"))
28553 i = 3;
28554 else if (lang_GNU_CXX ()
28555 || ! strcmp (language_string, "GNU Objective-C++"))
28556 i = 9;
28557 else if (! strcmp (language_string, "GNU Java"))
28558 i = 13;
28559 else if (! strcmp (language_string, "GNU Objective-C"))
28560 i = 14;
28561 else
28562 gcc_unreachable ();
28563 fprintf (file, "%d,", i);
28565 /* 8 single bit fields: global linkage (not set for C extern linkage,
28566 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28567 from start of procedure stored in tbtab, internal function, function
28568 has controlled storage, function has no toc, function uses fp,
28569 function logs/aborts fp operations. */
28570 /* Assume that fp operations are used if any fp reg must be saved. */
28571 fprintf (file, "%d,",
28572 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28574 /* 6 bitfields: function is interrupt handler, name present in
28575 proc table, function calls alloca, on condition directives
28576 (controls stack walks, 3 bits), saves condition reg, saves
28577 link reg. */
28578 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28579 set up as a frame pointer, even when there is no alloca call. */
28580 fprintf (file, "%d,",
28581 ((optional_tbtab << 6)
28582 | ((optional_tbtab & frame_pointer_needed) << 5)
28583 | (info->cr_save_p << 1)
28584 | (info->lr_save_p)));
28586 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28587 (6 bits). */
28588 fprintf (file, "%d,",
28589 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28591 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28592 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28594 if (optional_tbtab)
28596 /* Compute the parameter info from the function decl argument
28597 list. */
28598 tree decl;
28599 int next_parm_info_bit = 31;
28601 for (decl = DECL_ARGUMENTS (current_function_decl);
28602 decl; decl = DECL_CHAIN (decl))
28604 rtx parameter = DECL_INCOMING_RTL (decl);
28605 machine_mode mode = GET_MODE (parameter);
28607 if (GET_CODE (parameter) == REG)
28609 if (SCALAR_FLOAT_MODE_P (mode))
28611 int bits;
28613 float_parms++;
28615 switch (mode)
28617 case SFmode:
28618 case SDmode:
28619 bits = 0x2;
28620 break;
28622 case DFmode:
28623 case DDmode:
28624 case TFmode:
28625 case TDmode:
28626 case IFmode:
28627 case KFmode:
28628 bits = 0x3;
28629 break;
28631 default:
28632 gcc_unreachable ();
28635 /* If only one bit will fit, don't or in this entry. */
28636 if (next_parm_info_bit > 0)
28637 parm_info |= (bits << (next_parm_info_bit - 1));
28638 next_parm_info_bit -= 2;
28640 else
28642 fixed_parms += ((GET_MODE_SIZE (mode)
28643 + (UNITS_PER_WORD - 1))
28644 / UNITS_PER_WORD);
28645 next_parm_info_bit -= 1;
28651 /* Number of fixed point parameters. */
28652 /* This is actually the number of words of fixed point parameters; thus
28653 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28654 fprintf (file, "%d,", fixed_parms);
28656 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28657 all on stack. */
28658 /* This is actually the number of fp registers that hold parameters;
28659 and thus the maximum value is 13. */
28660 /* Set parameters on stack bit if parameters are not in their original
28661 registers, regardless of whether they are on the stack? Xlc
28662 seems to set the bit when not optimizing. */
28663 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28665 if (! optional_tbtab)
28666 return;
28668 /* Optional fields follow. Some are variable length. */
28670 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
28671 11 double float. */
28672 /* There is an entry for each parameter in a register, in the order that
28673 they occur in the parameter list. Any intervening arguments on the
28674 stack are ignored. If the list overflows a long (max possible length
28675 34 bits) then completely leave off all elements that don't fit. */
28676 /* Only emit this long if there was at least one parameter. */
28677 if (fixed_parms || float_parms)
28678 fprintf (file, "\t.long %d\n", parm_info);
28680 /* Offset from start of code to tb table. */
28681 fputs ("\t.long ", file);
28682 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28683 RS6000_OUTPUT_BASENAME (file, fname);
28684 putc ('-', file);
28685 rs6000_output_function_entry (file, fname);
28686 putc ('\n', file);
28688 /* Interrupt handler mask. */
28689 /* Omit this long, since we never set the interrupt handler bit
28690 above. */
28692 /* Number of CTL (controlled storage) anchors. */
28693 /* Omit this long, since the has_ctl bit is never set above. */
28695 /* Displacement into stack of each CTL anchor. */
28696 /* Omit this list of longs, because there are no CTL anchors. */
28698 /* Length of function name. */
28699 if (*fname == '*')
28700 ++fname;
28701 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28703 /* Function name. */
28704 assemble_string (fname, strlen (fname));
28706 /* Register for alloca automatic storage; this is always reg 31.
28707 Only emit this if the alloca bit was set above. */
28708 if (frame_pointer_needed)
28709 fputs ("\t.byte 31\n", file);
28711 fputs ("\t.align 2\n", file);
28714 /* Arrange to define .LCTOC1 label, if not already done. */
28715 if (need_toc_init)
28717 need_toc_init = 0;
28718 if (!toc_initialized)
28720 switch_to_section (toc_section);
28721 switch_to_section (current_function_section ());
28726 /* -fsplit-stack support. */
28728 /* A SYMBOL_REF for __morestack. */
28729 static GTY(()) rtx morestack_ref;
28731 static rtx
28732 gen_add3_const (rtx rt, rtx ra, long c)
28734 if (TARGET_64BIT)
28735 return gen_adddi3 (rt, ra, GEN_INT (c));
28736 else
28737 return gen_addsi3 (rt, ra, GEN_INT (c));
28740 /* Emit -fsplit-stack prologue, which goes before the regular function
28741 prologue (at local entry point in the case of ELFv2). */
28743 void
28744 rs6000_expand_split_stack_prologue (void)
28746 rs6000_stack_t *info = rs6000_stack_info ();
28747 unsigned HOST_WIDE_INT allocate;
28748 long alloc_hi, alloc_lo;
28749 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28750 rtx_insn *insn;
28752 gcc_assert (flag_split_stack && reload_completed);
28754 if (!info->push_p)
28755 return;
28757 if (global_regs[29])
28759 error ("-fsplit-stack uses register r29");
28760 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28761 "conflicts with %qD", global_regs_decl[29]);
28764 allocate = info->total_size;
28765 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28767 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28768 return;
28770 if (morestack_ref == NULL_RTX)
28772 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28773 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28774 | SYMBOL_FLAG_FUNCTION);
28777 r0 = gen_rtx_REG (Pmode, 0);
28778 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28779 r12 = gen_rtx_REG (Pmode, 12);
28780 emit_insn (gen_load_split_stack_limit (r0));
28781 /* Always emit two insns here to calculate the requested stack,
28782 so that the linker can edit them when adjusting size for calling
28783 non-split-stack code. */
28784 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28785 alloc_lo = -allocate - alloc_hi;
28786 if (alloc_hi != 0)
28788 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28789 if (alloc_lo != 0)
28790 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28791 else
28792 emit_insn (gen_nop ());
28794 else
28796 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28797 emit_insn (gen_nop ());
28800 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28801 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28802 ok_label = gen_label_rtx ();
28803 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28804 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28805 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28806 pc_rtx);
28807 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28808 JUMP_LABEL (jump) = ok_label;
28809 /* Mark the jump as very likely to be taken. */
28810 add_int_reg_note (jump, REG_BR_PROB,
28811 REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100);
28813 lr = gen_rtx_REG (Pmode, LR_REGNO);
28814 insn = emit_move_insn (r0, lr);
28815 RTX_FRAME_RELATED_P (insn) = 1;
28816 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28817 RTX_FRAME_RELATED_P (insn) = 1;
28819 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28820 const0_rtx, const0_rtx));
28821 call_fusage = NULL_RTX;
28822 use_reg (&call_fusage, r12);
28823 /* Say the call uses r0, even though it doesn't, to stop regrename
28824 from twiddling with the insns saving lr, trashing args for cfun.
28825 The insns restoring lr are similarly protected by making
28826 split_stack_return use r0. */
28827 use_reg (&call_fusage, r0);
28828 add_function_usage_to (insn, call_fusage);
28829 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28830 insn = emit_move_insn (lr, r0);
28831 add_reg_note (insn, REG_CFA_RESTORE, lr);
28832 RTX_FRAME_RELATED_P (insn) = 1;
28833 emit_insn (gen_split_stack_return ());
28835 emit_label (ok_label);
28836 LABEL_NUSES (ok_label) = 1;
28839 /* Return the internal arg pointer used for function incoming
28840 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28841 to copy it to a pseudo in order for it to be preserved over calls
28842 and suchlike. We'd really like to use a pseudo here for the
28843 internal arg pointer but data-flow analysis is not prepared to
28844 accept pseudos as live at the beginning of a function. */
28846 static rtx
28847 rs6000_internal_arg_pointer (void)
28849 if (flag_split_stack
28850 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28851 == NULL))
28854 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28856 rtx pat;
28858 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28859 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28861 /* Put the pseudo initialization right after the note at the
28862 beginning of the function. */
28863 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28864 gen_rtx_REG (Pmode, 12));
28865 push_topmost_sequence ();
28866 emit_insn_after (pat, get_insns ());
28867 pop_topmost_sequence ();
28869 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28870 FIRST_PARM_OFFSET (current_function_decl));
28872 return virtual_incoming_args_rtx;
28875 /* We may have to tell the dataflow pass that the split stack prologue
28876 is initializing a register. */
28878 static void
28879 rs6000_live_on_entry (bitmap regs)
28881 if (flag_split_stack)
28882 bitmap_set_bit (regs, 12);
28885 /* Emit -fsplit-stack dynamic stack allocation space check. */
28887 void
28888 rs6000_split_stack_space_check (rtx size, rtx label)
28890 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28891 rtx limit = gen_reg_rtx (Pmode);
28892 rtx requested = gen_reg_rtx (Pmode);
28893 rtx cmp = gen_reg_rtx (CCUNSmode);
28894 rtx jump;
28896 emit_insn (gen_load_split_stack_limit (limit));
28897 if (CONST_INT_P (size))
28898 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
28899 else
28901 size = force_reg (Pmode, size);
28902 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
28904 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
28905 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28906 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
28907 gen_rtx_LABEL_REF (VOIDmode, label),
28908 pc_rtx);
28909 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28910 JUMP_LABEL (jump) = label;
28913 /* A C compound statement that outputs the assembler code for a thunk
28914 function, used to implement C++ virtual function calls with
28915 multiple inheritance. The thunk acts as a wrapper around a virtual
28916 function, adjusting the implicit object parameter before handing
28917 control off to the real function.
28919 First, emit code to add the integer DELTA to the location that
28920 contains the incoming first argument. Assume that this argument
28921 contains a pointer, and is the one used to pass the `this' pointer
28922 in C++. This is the incoming argument *before* the function
28923 prologue, e.g. `%o0' on a sparc. The addition must preserve the
28924 values of all other incoming arguments.
28926 After the addition, emit code to jump to FUNCTION, which is a
28927 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
28928 not touch the return address. Hence returning from FUNCTION will
28929 return to whoever called the current `thunk'.
28931 The effect must be as if FUNCTION had been called directly with the
28932 adjusted first argument. This macro is responsible for emitting
28933 all of the code for a thunk function; output_function_prologue()
28934 and output_function_epilogue() are not invoked.
28936 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
28937 been extracted from it.) It might possibly be useful on some
28938 targets, but probably not.
28940 If you do not define this macro, the target-independent code in the
28941 C++ frontend will generate a less efficient heavyweight thunk that
28942 calls FUNCTION instead of jumping to it. The generic approach does
28943 not support varargs. */
28945 static void
28946 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
28947 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
28948 tree function)
28950 rtx this_rtx, funexp;
28951 rtx_insn *insn;
28953 reload_completed = 1;
28954 epilogue_completed = 1;
28956 /* Mark the end of the (empty) prologue. */
28957 emit_note (NOTE_INSN_PROLOGUE_END);
28959 /* Find the "this" pointer. If the function returns a structure,
28960 the structure return pointer is in r3. */
28961 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
28962 this_rtx = gen_rtx_REG (Pmode, 4);
28963 else
28964 this_rtx = gen_rtx_REG (Pmode, 3);
28966 /* Apply the constant offset, if required. */
28967 if (delta)
28968 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
28970 /* Apply the offset from the vtable, if required. */
28971 if (vcall_offset)
28973 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
28974 rtx tmp = gen_rtx_REG (Pmode, 12);
28976 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
28977 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
28979 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
28980 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
28982 else
28984 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
28986 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
28988 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
28991 /* Generate a tail call to the target function. */
28992 if (!TREE_USED (function))
28994 assemble_external (function);
28995 TREE_USED (function) = 1;
28997 funexp = XEXP (DECL_RTL (function), 0);
28998 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29000 #if TARGET_MACHO
29001 if (MACHOPIC_INDIRECT)
29002 funexp = machopic_indirect_call_target (funexp);
29003 #endif
29005 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29006 generate sibcall RTL explicitly. */
29007 insn = emit_call_insn (
29008 gen_rtx_PARALLEL (VOIDmode,
29009 gen_rtvec (4,
29010 gen_rtx_CALL (VOIDmode,
29011 funexp, const0_rtx),
29012 gen_rtx_USE (VOIDmode, const0_rtx),
29013 gen_rtx_USE (VOIDmode,
29014 gen_rtx_REG (SImode,
29015 LR_REGNO)),
29016 simple_return_rtx)));
29017 SIBLING_CALL_P (insn) = 1;
29018 emit_barrier ();
29020 /* Run just enough of rest_of_compilation to get the insns emitted.
29021 There's not really enough bulk here to make other passes such as
29022 instruction scheduling worth while. Note that use_thunk calls
29023 assemble_start_function and assemble_end_function. */
29024 insn = get_insns ();
29025 shorten_branches (insn);
29026 final_start_function (insn, file, 1);
29027 final (insn, file, 1);
29028 final_end_function ();
29030 reload_completed = 0;
29031 epilogue_completed = 0;
29034 /* A quick summary of the various types of 'constant-pool tables'
29035 under PowerPC:
29037 Target Flags Name One table per
29038 AIX (none) AIX TOC object file
29039 AIX -mfull-toc AIX TOC object file
29040 AIX -mminimal-toc AIX minimal TOC translation unit
29041 SVR4/EABI (none) SVR4 SDATA object file
29042 SVR4/EABI -fpic SVR4 pic object file
29043 SVR4/EABI -fPIC SVR4 PIC translation unit
29044 SVR4/EABI -mrelocatable EABI TOC function
29045 SVR4/EABI -maix AIX TOC object file
29046 SVR4/EABI -maix -mminimal-toc
29047 AIX minimal TOC translation unit
29049 Name Reg. Set by entries contains:
29050 made by addrs? fp? sum?
29052 AIX TOC 2 crt0 as Y option option
29053 AIX minimal TOC 30 prolog gcc Y Y option
29054 SVR4 SDATA 13 crt0 gcc N Y N
29055 SVR4 pic 30 prolog ld Y not yet N
29056 SVR4 PIC 30 prolog gcc Y option option
29057 EABI TOC 30 prolog gcc Y option option
29061 /* Hash functions for the hash table. */
29063 static unsigned
29064 rs6000_hash_constant (rtx k)
29066 enum rtx_code code = GET_CODE (k);
29067 machine_mode mode = GET_MODE (k);
29068 unsigned result = (code << 3) ^ mode;
29069 const char *format;
29070 int flen, fidx;
29072 format = GET_RTX_FORMAT (code);
29073 flen = strlen (format);
29074 fidx = 0;
29076 switch (code)
29078 case LABEL_REF:
29079 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29081 case CONST_WIDE_INT:
29083 int i;
29084 flen = CONST_WIDE_INT_NUNITS (k);
29085 for (i = 0; i < flen; i++)
29086 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29087 return result;
29090 case CONST_DOUBLE:
29091 if (mode != VOIDmode)
29092 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29093 flen = 2;
29094 break;
29096 case CODE_LABEL:
29097 fidx = 3;
29098 break;
29100 default:
29101 break;
29104 for (; fidx < flen; fidx++)
29105 switch (format[fidx])
29107 case 's':
29109 unsigned i, len;
29110 const char *str = XSTR (k, fidx);
29111 len = strlen (str);
29112 result = result * 613 + len;
29113 for (i = 0; i < len; i++)
29114 result = result * 613 + (unsigned) str[i];
29115 break;
29117 case 'u':
29118 case 'e':
29119 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29120 break;
29121 case 'i':
29122 case 'n':
29123 result = result * 613 + (unsigned) XINT (k, fidx);
29124 break;
29125 case 'w':
29126 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29127 result = result * 613 + (unsigned) XWINT (k, fidx);
29128 else
29130 size_t i;
29131 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29132 result = result * 613 + (unsigned) (XWINT (k, fidx)
29133 >> CHAR_BIT * i);
29135 break;
29136 case '0':
29137 break;
29138 default:
29139 gcc_unreachable ();
29142 return result;
29145 hashval_t
29146 toc_hasher::hash (toc_hash_struct *thc)
29148 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29151 /* Compare H1 and H2 for equivalence. */
29153 bool
29154 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29156 rtx r1 = h1->key;
29157 rtx r2 = h2->key;
29159 if (h1->key_mode != h2->key_mode)
29160 return 0;
29162 return rtx_equal_p (r1, r2);
29165 /* These are the names given by the C++ front-end to vtables, and
29166 vtable-like objects. Ideally, this logic should not be here;
29167 instead, there should be some programmatic way of inquiring as
29168 to whether or not an object is a vtable. */
29170 #define VTABLE_NAME_P(NAME) \
29171 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29172 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29173 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29174 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29175 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29177 #ifdef NO_DOLLAR_IN_LABEL
29178 /* Return a GGC-allocated character string translating dollar signs in
29179 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29181 const char *
29182 rs6000_xcoff_strip_dollar (const char *name)
29184 char *strip, *p;
29185 const char *q;
29186 size_t len;
29188 q = (const char *) strchr (name, '$');
29190 if (q == 0 || q == name)
29191 return name;
29193 len = strlen (name);
29194 strip = XALLOCAVEC (char, len + 1);
29195 strcpy (strip, name);
29196 p = strip + (q - name);
29197 while (p)
29199 *p = '_';
29200 p = strchr (p + 1, '$');
29203 return ggc_alloc_string (strip, len);
29205 #endif
29207 void
29208 rs6000_output_symbol_ref (FILE *file, rtx x)
29210 /* Currently C++ toc references to vtables can be emitted before it
29211 is decided whether the vtable is public or private. If this is
29212 the case, then the linker will eventually complain that there is
29213 a reference to an unknown section. Thus, for vtables only,
29214 we emit the TOC reference to reference the symbol and not the
29215 section. */
29216 const char *name = XSTR (x, 0);
29218 tree decl = SYMBOL_REF_DECL (x);
29219 if (decl /* sync condition with assemble_external () */
29220 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
29221 && (TREE_CODE (decl) == VAR_DECL
29222 || TREE_CODE (decl) == FUNCTION_DECL)
29223 && name[strlen (name) - 1] != ']')
29225 name = concat (name,
29226 (TREE_CODE (decl) == FUNCTION_DECL
29227 ? "[DS]" : "[UA]"),
29228 NULL);
29229 XSTR (x, 0) = name;
29232 if (VTABLE_NAME_P (name))
29234 RS6000_OUTPUT_BASENAME (file, name);
29236 else
29237 assemble_name (file, name);
29240 /* Output a TOC entry. We derive the entry name from what is being
29241 written. */
29243 void
29244 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29246 char buf[256];
29247 const char *name = buf;
29248 rtx base = x;
29249 HOST_WIDE_INT offset = 0;
29251 gcc_assert (!TARGET_NO_TOC);
29253 /* When the linker won't eliminate them, don't output duplicate
29254 TOC entries (this happens on AIX if there is any kind of TOC,
29255 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29256 CODE_LABELs. */
29257 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29259 struct toc_hash_struct *h;
29261 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29262 time because GGC is not initialized at that point. */
29263 if (toc_hash_table == NULL)
29264 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29266 h = ggc_alloc<toc_hash_struct> ();
29267 h->key = x;
29268 h->key_mode = mode;
29269 h->labelno = labelno;
29271 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29272 if (*found == NULL)
29273 *found = h;
29274 else /* This is indeed a duplicate.
29275 Set this label equal to that label. */
29277 fputs ("\t.set ", file);
29278 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29279 fprintf (file, "%d,", labelno);
29280 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29281 fprintf (file, "%d\n", ((*found)->labelno));
29283 #ifdef HAVE_AS_TLS
29284 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29285 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29286 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29288 fputs ("\t.set ", file);
29289 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29290 fprintf (file, "%d,", labelno);
29291 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29292 fprintf (file, "%d\n", ((*found)->labelno));
29294 #endif
29295 return;
29299 /* If we're going to put a double constant in the TOC, make sure it's
29300 aligned properly when strict alignment is on. */
29301 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29302 && STRICT_ALIGNMENT
29303 && GET_MODE_BITSIZE (mode) >= 64
29304 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29305 ASM_OUTPUT_ALIGN (file, 3);
29308 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29310 /* Handle FP constants specially. Note that if we have a minimal
29311 TOC, things we put here aren't actually in the TOC, so we can allow
29312 FP constants. */
29313 if (GET_CODE (x) == CONST_DOUBLE &&
29314 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29315 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29317 long k[4];
29319 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29320 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29321 else
29322 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29324 if (TARGET_64BIT)
29326 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29327 fputs (DOUBLE_INT_ASM_OP, file);
29328 else
29329 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29330 k[0] & 0xffffffff, k[1] & 0xffffffff,
29331 k[2] & 0xffffffff, k[3] & 0xffffffff);
29332 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29333 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29334 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29335 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29336 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29337 return;
29339 else
29341 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29342 fputs ("\t.long ", file);
29343 else
29344 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29345 k[0] & 0xffffffff, k[1] & 0xffffffff,
29346 k[2] & 0xffffffff, k[3] & 0xffffffff);
29347 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29348 k[0] & 0xffffffff, k[1] & 0xffffffff,
29349 k[2] & 0xffffffff, k[3] & 0xffffffff);
29350 return;
29353 else if (GET_CODE (x) == CONST_DOUBLE &&
29354 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29356 long k[2];
29358 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29359 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29360 else
29361 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29363 if (TARGET_64BIT)
29365 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29366 fputs (DOUBLE_INT_ASM_OP, file);
29367 else
29368 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29369 k[0] & 0xffffffff, k[1] & 0xffffffff);
29370 fprintf (file, "0x%lx%08lx\n",
29371 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29372 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29373 return;
29375 else
29377 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29378 fputs ("\t.long ", file);
29379 else
29380 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29381 k[0] & 0xffffffff, k[1] & 0xffffffff);
29382 fprintf (file, "0x%lx,0x%lx\n",
29383 k[0] & 0xffffffff, k[1] & 0xffffffff);
29384 return;
29387 else if (GET_CODE (x) == CONST_DOUBLE &&
29388 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29390 long l;
29392 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29393 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29394 else
29395 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29397 if (TARGET_64BIT)
29399 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29400 fputs (DOUBLE_INT_ASM_OP, file);
29401 else
29402 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29403 if (WORDS_BIG_ENDIAN)
29404 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29405 else
29406 fprintf (file, "0x%lx\n", l & 0xffffffff);
29407 return;
29409 else
29411 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29412 fputs ("\t.long ", file);
29413 else
29414 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29415 fprintf (file, "0x%lx\n", l & 0xffffffff);
29416 return;
29419 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29421 unsigned HOST_WIDE_INT low;
29422 HOST_WIDE_INT high;
29424 low = INTVAL (x) & 0xffffffff;
29425 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29427 /* TOC entries are always Pmode-sized, so when big-endian
29428 smaller integer constants in the TOC need to be padded.
29429 (This is still a win over putting the constants in
29430 a separate constant pool, because then we'd have
29431 to have both a TOC entry _and_ the actual constant.)
29433 For a 32-bit target, CONST_INT values are loaded and shifted
29434 entirely within `low' and can be stored in one TOC entry. */
29436 /* It would be easy to make this work, but it doesn't now. */
29437 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29439 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29441 low |= high << 32;
29442 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29443 high = (HOST_WIDE_INT) low >> 32;
29444 low &= 0xffffffff;
29447 if (TARGET_64BIT)
29449 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29450 fputs (DOUBLE_INT_ASM_OP, file);
29451 else
29452 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29453 (long) high & 0xffffffff, (long) low & 0xffffffff);
29454 fprintf (file, "0x%lx%08lx\n",
29455 (long) high & 0xffffffff, (long) low & 0xffffffff);
29456 return;
29458 else
29460 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29462 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29463 fputs ("\t.long ", file);
29464 else
29465 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29466 (long) high & 0xffffffff, (long) low & 0xffffffff);
29467 fprintf (file, "0x%lx,0x%lx\n",
29468 (long) high & 0xffffffff, (long) low & 0xffffffff);
29470 else
29472 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29473 fputs ("\t.long ", file);
29474 else
29475 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29476 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29478 return;
29482 if (GET_CODE (x) == CONST)
29484 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29485 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29487 base = XEXP (XEXP (x, 0), 0);
29488 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29491 switch (GET_CODE (base))
29493 case SYMBOL_REF:
29494 name = XSTR (base, 0);
29495 break;
29497 case LABEL_REF:
29498 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29499 CODE_LABEL_NUMBER (XEXP (base, 0)));
29500 break;
29502 case CODE_LABEL:
29503 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29504 break;
29506 default:
29507 gcc_unreachable ();
29510 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29511 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29512 else
29514 fputs ("\t.tc ", file);
29515 RS6000_OUTPUT_BASENAME (file, name);
29517 if (offset < 0)
29518 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29519 else if (offset)
29520 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29522 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29523 after other TOC symbols, reducing overflow of small TOC access
29524 to [TC] symbols. */
29525 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29526 ? "[TE]," : "[TC],", file);
29529 /* Currently C++ toc references to vtables can be emitted before it
29530 is decided whether the vtable is public or private. If this is
29531 the case, then the linker will eventually complain that there is
29532 a TOC reference to an unknown section. Thus, for vtables only,
29533 we emit the TOC reference to reference the symbol and not the
29534 section. */
29535 if (VTABLE_NAME_P (name))
29537 RS6000_OUTPUT_BASENAME (file, name);
29538 if (offset < 0)
29539 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29540 else if (offset > 0)
29541 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29543 else
29544 output_addr_const (file, x);
29546 #if HAVE_AS_TLS
29547 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29549 switch (SYMBOL_REF_TLS_MODEL (base))
29551 case 0:
29552 break;
29553 case TLS_MODEL_LOCAL_EXEC:
29554 fputs ("@le", file);
29555 break;
29556 case TLS_MODEL_INITIAL_EXEC:
29557 fputs ("@ie", file);
29558 break;
29559 /* Use global-dynamic for local-dynamic. */
29560 case TLS_MODEL_GLOBAL_DYNAMIC:
29561 case TLS_MODEL_LOCAL_DYNAMIC:
29562 putc ('\n', file);
29563 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29564 fputs ("\t.tc .", file);
29565 RS6000_OUTPUT_BASENAME (file, name);
29566 fputs ("[TC],", file);
29567 output_addr_const (file, x);
29568 fputs ("@m", file);
29569 break;
29570 default:
29571 gcc_unreachable ();
29574 #endif
29576 putc ('\n', file);
29579 /* Output an assembler pseudo-op to write an ASCII string of N characters
29580 starting at P to FILE.
29582 On the RS/6000, we have to do this using the .byte operation and
29583 write out special characters outside the quoted string.
29584 Also, the assembler is broken; very long strings are truncated,
29585 so we must artificially break them up early. */
29587 void
29588 output_ascii (FILE *file, const char *p, int n)
29590 char c;
29591 int i, count_string;
29592 const char *for_string = "\t.byte \"";
29593 const char *for_decimal = "\t.byte ";
29594 const char *to_close = NULL;
29596 count_string = 0;
29597 for (i = 0; i < n; i++)
29599 c = *p++;
29600 if (c >= ' ' && c < 0177)
29602 if (for_string)
29603 fputs (for_string, file);
29604 putc (c, file);
29606 /* Write two quotes to get one. */
29607 if (c == '"')
29609 putc (c, file);
29610 ++count_string;
29613 for_string = NULL;
29614 for_decimal = "\"\n\t.byte ";
29615 to_close = "\"\n";
29616 ++count_string;
29618 if (count_string >= 512)
29620 fputs (to_close, file);
29622 for_string = "\t.byte \"";
29623 for_decimal = "\t.byte ";
29624 to_close = NULL;
29625 count_string = 0;
29628 else
29630 if (for_decimal)
29631 fputs (for_decimal, file);
29632 fprintf (file, "%d", c);
29634 for_string = "\n\t.byte \"";
29635 for_decimal = ", ";
29636 to_close = "\n";
29637 count_string = 0;
29641 /* Now close the string if we have written one. Then end the line. */
29642 if (to_close)
29643 fputs (to_close, file);
29646 /* Generate a unique section name for FILENAME for a section type
29647 represented by SECTION_DESC. Output goes into BUF.
29649 SECTION_DESC can be any string, as long as it is different for each
29650 possible section type.
29652 We name the section in the same manner as xlc. The name begins with an
29653 underscore followed by the filename (after stripping any leading directory
29654 names) with the last period replaced by the string SECTION_DESC. If
29655 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29656 the name. */
29658 void
29659 rs6000_gen_section_name (char **buf, const char *filename,
29660 const char *section_desc)
29662 const char *q, *after_last_slash, *last_period = 0;
29663 char *p;
29664 int len;
29666 after_last_slash = filename;
29667 for (q = filename; *q; q++)
29669 if (*q == '/')
29670 after_last_slash = q + 1;
29671 else if (*q == '.')
29672 last_period = q;
29675 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29676 *buf = (char *) xmalloc (len);
29678 p = *buf;
29679 *p++ = '_';
29681 for (q = after_last_slash; *q; q++)
29683 if (q == last_period)
29685 strcpy (p, section_desc);
29686 p += strlen (section_desc);
29687 break;
29690 else if (ISALNUM (*q))
29691 *p++ = *q;
29694 if (last_period == 0)
29695 strcpy (p, section_desc);
29696 else
29697 *p = '\0';
29700 /* Emit profile function. */
29702 void
29703 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29705 /* Non-standard profiling for kernels, which just saves LR then calls
29706 _mcount without worrying about arg saves. The idea is to change
29707 the function prologue as little as possible as it isn't easy to
29708 account for arg save/restore code added just for _mcount. */
29709 if (TARGET_PROFILE_KERNEL)
29710 return;
29712 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29714 #ifndef NO_PROFILE_COUNTERS
29715 # define NO_PROFILE_COUNTERS 0
29716 #endif
29717 if (NO_PROFILE_COUNTERS)
29718 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29719 LCT_NORMAL, VOIDmode, 0);
29720 else
29722 char buf[30];
29723 const char *label_name;
29724 rtx fun;
29726 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29727 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29728 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29730 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29731 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
29734 else if (DEFAULT_ABI == ABI_DARWIN)
29736 const char *mcount_name = RS6000_MCOUNT;
29737 int caller_addr_regno = LR_REGNO;
29739 /* Be conservative and always set this, at least for now. */
29740 crtl->uses_pic_offset_table = 1;
29742 #if TARGET_MACHO
29743 /* For PIC code, set up a stub and collect the caller's address
29744 from r0, which is where the prologue puts it. */
29745 if (MACHOPIC_INDIRECT
29746 && crtl->uses_pic_offset_table)
29747 caller_addr_regno = 0;
29748 #endif
29749 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29750 LCT_NORMAL, VOIDmode, 1,
29751 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29755 /* Write function profiler code. */
29757 void
29758 output_function_profiler (FILE *file, int labelno)
29760 char buf[100];
29762 switch (DEFAULT_ABI)
29764 default:
29765 gcc_unreachable ();
29767 case ABI_V4:
29768 if (!TARGET_32BIT)
29770 warning (0, "no profiling of 64-bit code for this ABI");
29771 return;
29773 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29774 fprintf (file, "\tmflr %s\n", reg_names[0]);
29775 if (NO_PROFILE_COUNTERS)
29777 asm_fprintf (file, "\tstw %s,4(%s)\n",
29778 reg_names[0], reg_names[1]);
29780 else if (TARGET_SECURE_PLT && flag_pic)
29782 if (TARGET_LINK_STACK)
29784 char name[32];
29785 get_ppc476_thunk_name (name);
29786 asm_fprintf (file, "\tbl %s\n", name);
29788 else
29789 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29790 asm_fprintf (file, "\tstw %s,4(%s)\n",
29791 reg_names[0], reg_names[1]);
29792 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29793 asm_fprintf (file, "\taddis %s,%s,",
29794 reg_names[12], reg_names[12]);
29795 assemble_name (file, buf);
29796 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29797 assemble_name (file, buf);
29798 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29800 else if (flag_pic == 1)
29802 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29803 asm_fprintf (file, "\tstw %s,4(%s)\n",
29804 reg_names[0], reg_names[1]);
29805 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29806 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29807 assemble_name (file, buf);
29808 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29810 else if (flag_pic > 1)
29812 asm_fprintf (file, "\tstw %s,4(%s)\n",
29813 reg_names[0], reg_names[1]);
29814 /* Now, we need to get the address of the label. */
29815 if (TARGET_LINK_STACK)
29817 char name[32];
29818 get_ppc476_thunk_name (name);
29819 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29820 assemble_name (file, buf);
29821 fputs ("-.\n1:", file);
29822 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29823 asm_fprintf (file, "\taddi %s,%s,4\n",
29824 reg_names[11], reg_names[11]);
29826 else
29828 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29829 assemble_name (file, buf);
29830 fputs ("-.\n1:", file);
29831 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29833 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29834 reg_names[0], reg_names[11]);
29835 asm_fprintf (file, "\tadd %s,%s,%s\n",
29836 reg_names[0], reg_names[0], reg_names[11]);
29838 else
29840 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29841 assemble_name (file, buf);
29842 fputs ("@ha\n", file);
29843 asm_fprintf (file, "\tstw %s,4(%s)\n",
29844 reg_names[0], reg_names[1]);
29845 asm_fprintf (file, "\tla %s,", reg_names[0]);
29846 assemble_name (file, buf);
29847 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29850 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29851 fprintf (file, "\tbl %s%s\n",
29852 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29853 break;
29855 case ABI_AIX:
29856 case ABI_ELFv2:
29857 case ABI_DARWIN:
29858 /* Don't do anything, done in output_profile_hook (). */
29859 break;
29865 /* The following variable value is the last issued insn. */
29867 static rtx last_scheduled_insn;
29869 /* The following variable helps to balance issuing of load and
29870 store instructions */
29872 static int load_store_pendulum;
29874 /* Power4 load update and store update instructions are cracked into a
29875 load or store and an integer insn which are executed in the same cycle.
29876 Branches have their own dispatch slot which does not count against the
29877 GCC issue rate, but it changes the program flow so there are no other
29878 instructions to issue in this cycle. */
29880 static int
29881 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29883 last_scheduled_insn = insn;
29884 if (GET_CODE (PATTERN (insn)) == USE
29885 || GET_CODE (PATTERN (insn)) == CLOBBER)
29887 cached_can_issue_more = more;
29888 return cached_can_issue_more;
29891 if (insn_terminates_group_p (insn, current_group))
29893 cached_can_issue_more = 0;
29894 return cached_can_issue_more;
29897 /* If no reservation, but reach here */
29898 if (recog_memoized (insn) < 0)
29899 return more;
29901 if (rs6000_sched_groups)
29903 if (is_microcoded_insn (insn))
29904 cached_can_issue_more = 0;
29905 else if (is_cracked_insn (insn))
29906 cached_can_issue_more = more > 2 ? more - 2 : 0;
29907 else
29908 cached_can_issue_more = more - 1;
29910 return cached_can_issue_more;
29913 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
29914 return 0;
29916 cached_can_issue_more = more - 1;
29917 return cached_can_issue_more;
29920 static int
29921 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
29923 int r = rs6000_variable_issue_1 (insn, more);
29924 if (verbose)
29925 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
29926 return r;
29929 /* Adjust the cost of a scheduling dependency. Return the new cost of
29930 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
29932 static int
29933 rs6000_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
29935 enum attr_type attr_type;
29937 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
29938 return cost;
29940 switch (REG_NOTE_KIND (link))
29942 case REG_DEP_TRUE:
29944 /* Data dependency; DEP_INSN writes a register that INSN reads
29945 some cycles later. */
29947 /* Separate a load from a narrower, dependent store. */
29948 if (rs6000_sched_groups
29949 && GET_CODE (PATTERN (insn)) == SET
29950 && GET_CODE (PATTERN (dep_insn)) == SET
29951 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
29952 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
29953 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
29954 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
29955 return cost + 14;
29957 attr_type = get_attr_type (insn);
29959 switch (attr_type)
29961 case TYPE_JMPREG:
29962 /* Tell the first scheduling pass about the latency between
29963 a mtctr and bctr (and mtlr and br/blr). The first
29964 scheduling pass will not know about this latency since
29965 the mtctr instruction, which has the latency associated
29966 to it, will be generated by reload. */
29967 return 4;
29968 case TYPE_BRANCH:
29969 /* Leave some extra cycles between a compare and its
29970 dependent branch, to inhibit expensive mispredicts. */
29971 if ((rs6000_cpu_attr == CPU_PPC603
29972 || rs6000_cpu_attr == CPU_PPC604
29973 || rs6000_cpu_attr == CPU_PPC604E
29974 || rs6000_cpu_attr == CPU_PPC620
29975 || rs6000_cpu_attr == CPU_PPC630
29976 || rs6000_cpu_attr == CPU_PPC750
29977 || rs6000_cpu_attr == CPU_PPC7400
29978 || rs6000_cpu_attr == CPU_PPC7450
29979 || rs6000_cpu_attr == CPU_PPCE5500
29980 || rs6000_cpu_attr == CPU_PPCE6500
29981 || rs6000_cpu_attr == CPU_POWER4
29982 || rs6000_cpu_attr == CPU_POWER5
29983 || rs6000_cpu_attr == CPU_POWER7
29984 || rs6000_cpu_attr == CPU_POWER8
29985 || rs6000_cpu_attr == CPU_POWER9
29986 || rs6000_cpu_attr == CPU_CELL)
29987 && recog_memoized (dep_insn)
29988 && (INSN_CODE (dep_insn) >= 0))
29990 switch (get_attr_type (dep_insn))
29992 case TYPE_CMP:
29993 case TYPE_FPCOMPARE:
29994 case TYPE_CR_LOGICAL:
29995 case TYPE_DELAYED_CR:
29996 return cost + 2;
29997 case TYPE_EXTS:
29998 case TYPE_MUL:
29999 if (get_attr_dot (dep_insn) == DOT_YES)
30000 return cost + 2;
30001 else
30002 break;
30003 case TYPE_SHIFT:
30004 if (get_attr_dot (dep_insn) == DOT_YES
30005 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30006 return cost + 2;
30007 else
30008 break;
30009 default:
30010 break;
30012 break;
30014 case TYPE_STORE:
30015 case TYPE_FPSTORE:
30016 if ((rs6000_cpu == PROCESSOR_POWER6)
30017 && recog_memoized (dep_insn)
30018 && (INSN_CODE (dep_insn) >= 0))
30021 if (GET_CODE (PATTERN (insn)) != SET)
30022 /* If this happens, we have to extend this to schedule
30023 optimally. Return default for now. */
30024 return cost;
30026 /* Adjust the cost for the case where the value written
30027 by a fixed point operation is used as the address
30028 gen value on a store. */
30029 switch (get_attr_type (dep_insn))
30031 case TYPE_LOAD:
30032 case TYPE_CNTLZ:
30034 if (! store_data_bypass_p (dep_insn, insn))
30035 return get_attr_sign_extend (dep_insn)
30036 == SIGN_EXTEND_YES ? 6 : 4;
30037 break;
30039 case TYPE_SHIFT:
30041 if (! store_data_bypass_p (dep_insn, insn))
30042 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30043 6 : 3;
30044 break;
30046 case TYPE_INTEGER:
30047 case TYPE_ADD:
30048 case TYPE_LOGICAL:
30049 case TYPE_EXTS:
30050 case TYPE_INSERT:
30052 if (! store_data_bypass_p (dep_insn, insn))
30053 return 3;
30054 break;
30056 case TYPE_STORE:
30057 case TYPE_FPLOAD:
30058 case TYPE_FPSTORE:
30060 if (get_attr_update (dep_insn) == UPDATE_YES
30061 && ! store_data_bypass_p (dep_insn, insn))
30062 return 3;
30063 break;
30065 case TYPE_MUL:
30067 if (! store_data_bypass_p (dep_insn, insn))
30068 return 17;
30069 break;
30071 case TYPE_DIV:
30073 if (! store_data_bypass_p (dep_insn, insn))
30074 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30075 break;
30077 default:
30078 break;
30081 break;
30083 case TYPE_LOAD:
30084 if ((rs6000_cpu == PROCESSOR_POWER6)
30085 && recog_memoized (dep_insn)
30086 && (INSN_CODE (dep_insn) >= 0))
30089 /* Adjust the cost for the case where the value written
30090 by a fixed point instruction is used within the address
30091 gen portion of a subsequent load(u)(x) */
30092 switch (get_attr_type (dep_insn))
30094 case TYPE_LOAD:
30095 case TYPE_CNTLZ:
30097 if (set_to_load_agen (dep_insn, insn))
30098 return get_attr_sign_extend (dep_insn)
30099 == SIGN_EXTEND_YES ? 6 : 4;
30100 break;
30102 case TYPE_SHIFT:
30104 if (set_to_load_agen (dep_insn, insn))
30105 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30106 6 : 3;
30107 break;
30109 case TYPE_INTEGER:
30110 case TYPE_ADD:
30111 case TYPE_LOGICAL:
30112 case TYPE_EXTS:
30113 case TYPE_INSERT:
30115 if (set_to_load_agen (dep_insn, insn))
30116 return 3;
30117 break;
30119 case TYPE_STORE:
30120 case TYPE_FPLOAD:
30121 case TYPE_FPSTORE:
30123 if (get_attr_update (dep_insn) == UPDATE_YES
30124 && set_to_load_agen (dep_insn, insn))
30125 return 3;
30126 break;
30128 case TYPE_MUL:
30130 if (set_to_load_agen (dep_insn, insn))
30131 return 17;
30132 break;
30134 case TYPE_DIV:
30136 if (set_to_load_agen (dep_insn, insn))
30137 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30138 break;
30140 default:
30141 break;
30144 break;
30146 case TYPE_FPLOAD:
30147 if ((rs6000_cpu == PROCESSOR_POWER6)
30148 && get_attr_update (insn) == UPDATE_NO
30149 && recog_memoized (dep_insn)
30150 && (INSN_CODE (dep_insn) >= 0)
30151 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30152 return 2;
30154 default:
30155 break;
30158 /* Fall out to return default cost. */
30160 break;
30162 case REG_DEP_OUTPUT:
30163 /* Output dependency; DEP_INSN writes a register that INSN writes some
30164 cycles later. */
30165 if ((rs6000_cpu == PROCESSOR_POWER6)
30166 && recog_memoized (dep_insn)
30167 && (INSN_CODE (dep_insn) >= 0))
30169 attr_type = get_attr_type (insn);
30171 switch (attr_type)
30173 case TYPE_FP:
30174 if (get_attr_type (dep_insn) == TYPE_FP)
30175 return 1;
30176 break;
30177 case TYPE_FPLOAD:
30178 if (get_attr_update (insn) == UPDATE_NO
30179 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30180 return 2;
30181 break;
30182 default:
30183 break;
30186 case REG_DEP_ANTI:
30187 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30188 cycles later. */
30189 return 0;
30191 default:
30192 gcc_unreachable ();
30195 return cost;
30198 /* Debug version of rs6000_adjust_cost. */
30200 static int
30201 rs6000_debug_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
30202 int cost)
30204 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
30206 if (ret != cost)
30208 const char *dep;
30210 switch (REG_NOTE_KIND (link))
30212 default: dep = "unknown depencency"; break;
30213 case REG_DEP_TRUE: dep = "data dependency"; break;
30214 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30215 case REG_DEP_ANTI: dep = "anti depencency"; break;
30218 fprintf (stderr,
30219 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30220 "%s, insn:\n", ret, cost, dep);
30222 debug_rtx (insn);
30225 return ret;
30228 /* The function returns a true if INSN is microcoded.
30229 Return false otherwise. */
30231 static bool
30232 is_microcoded_insn (rtx_insn *insn)
30234 if (!insn || !NONDEBUG_INSN_P (insn)
30235 || GET_CODE (PATTERN (insn)) == USE
30236 || GET_CODE (PATTERN (insn)) == CLOBBER)
30237 return false;
30239 if (rs6000_cpu_attr == CPU_CELL)
30240 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30242 if (rs6000_sched_groups
30243 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30245 enum attr_type type = get_attr_type (insn);
30246 if ((type == TYPE_LOAD
30247 && get_attr_update (insn) == UPDATE_YES
30248 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30249 || ((type == TYPE_LOAD || type == TYPE_STORE)
30250 && get_attr_update (insn) == UPDATE_YES
30251 && get_attr_indexed (insn) == INDEXED_YES)
30252 || type == TYPE_MFCR)
30253 return true;
30256 return false;
30259 /* The function returns true if INSN is cracked into 2 instructions
30260 by the processor (and therefore occupies 2 issue slots). */
30262 static bool
30263 is_cracked_insn (rtx_insn *insn)
30265 if (!insn || !NONDEBUG_INSN_P (insn)
30266 || GET_CODE (PATTERN (insn)) == USE
30267 || GET_CODE (PATTERN (insn)) == CLOBBER)
30268 return false;
30270 if (rs6000_sched_groups
30271 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30273 enum attr_type type = get_attr_type (insn);
30274 if ((type == TYPE_LOAD
30275 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30276 && get_attr_update (insn) == UPDATE_NO)
30277 || (type == TYPE_LOAD
30278 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30279 && get_attr_update (insn) == UPDATE_YES
30280 && get_attr_indexed (insn) == INDEXED_NO)
30281 || (type == TYPE_STORE
30282 && get_attr_update (insn) == UPDATE_YES
30283 && get_attr_indexed (insn) == INDEXED_NO)
30284 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30285 && get_attr_update (insn) == UPDATE_YES)
30286 || type == TYPE_DELAYED_CR
30287 || (type == TYPE_EXTS
30288 && get_attr_dot (insn) == DOT_YES)
30289 || (type == TYPE_SHIFT
30290 && get_attr_dot (insn) == DOT_YES
30291 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30292 || (type == TYPE_MUL
30293 && get_attr_dot (insn) == DOT_YES)
30294 || type == TYPE_DIV
30295 || (type == TYPE_INSERT
30296 && get_attr_size (insn) == SIZE_32))
30297 return true;
30300 return false;
30303 /* The function returns true if INSN can be issued only from
30304 the branch slot. */
30306 static bool
30307 is_branch_slot_insn (rtx_insn *insn)
30309 if (!insn || !NONDEBUG_INSN_P (insn)
30310 || GET_CODE (PATTERN (insn)) == USE
30311 || GET_CODE (PATTERN (insn)) == CLOBBER)
30312 return false;
30314 if (rs6000_sched_groups)
30316 enum attr_type type = get_attr_type (insn);
30317 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30318 return true;
30319 return false;
30322 return false;
30325 /* The function returns true if out_inst sets a value that is
30326 used in the address generation computation of in_insn */
30327 static bool
30328 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30330 rtx out_set, in_set;
30332 /* For performance reasons, only handle the simple case where
30333 both loads are a single_set. */
30334 out_set = single_set (out_insn);
30335 if (out_set)
30337 in_set = single_set (in_insn);
30338 if (in_set)
30339 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30342 return false;
30345 /* Try to determine base/offset/size parts of the given MEM.
30346 Return true if successful, false if all the values couldn't
30347 be determined.
30349 This function only looks for REG or REG+CONST address forms.
30350 REG+REG address form will return false. */
30352 static bool
30353 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30354 HOST_WIDE_INT *size)
30356 rtx addr_rtx;
30357 if MEM_SIZE_KNOWN_P (mem)
30358 *size = MEM_SIZE (mem);
30359 else
30360 return false;
30362 addr_rtx = (XEXP (mem, 0));
30363 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30364 addr_rtx = XEXP (addr_rtx, 1);
30366 *offset = 0;
30367 while (GET_CODE (addr_rtx) == PLUS
30368 && CONST_INT_P (XEXP (addr_rtx, 1)))
30370 *offset += INTVAL (XEXP (addr_rtx, 1));
30371 addr_rtx = XEXP (addr_rtx, 0);
30373 if (!REG_P (addr_rtx))
30374 return false;
30376 *base = addr_rtx;
30377 return true;
30380 /* The function returns true if the target storage location of
30381 mem1 is adjacent to the target storage location of mem2 */
30382 /* Return 1 if memory locations are adjacent. */
30384 static bool
30385 adjacent_mem_locations (rtx mem1, rtx mem2)
30387 rtx reg1, reg2;
30388 HOST_WIDE_INT off1, size1, off2, size2;
30390 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30391 && get_memref_parts (mem2, &reg2, &off2, &size2))
30392 return ((REGNO (reg1) == REGNO (reg2))
30393 && ((off1 + size1 == off2)
30394 || (off2 + size2 == off1)));
30396 return false;
30399 /* This function returns true if it can be determined that the two MEM
30400 locations overlap by at least 1 byte based on base reg/offset/size. */
30402 static bool
30403 mem_locations_overlap (rtx mem1, rtx mem2)
30405 rtx reg1, reg2;
30406 HOST_WIDE_INT off1, size1, off2, size2;
30408 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30409 && get_memref_parts (mem2, &reg2, &off2, &size2))
30410 return ((REGNO (reg1) == REGNO (reg2))
30411 && (((off1 <= off2) && (off1 + size1 > off2))
30412 || ((off2 <= off1) && (off2 + size2 > off1))));
30414 return false;
30417 /* A C statement (sans semicolon) to update the integer scheduling
30418 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30419 INSN earlier, reduce the priority to execute INSN later. Do not
30420 define this macro if you do not need to adjust the scheduling
30421 priorities of insns. */
30423 static int
30424 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30426 rtx load_mem, str_mem;
30427 /* On machines (like the 750) which have asymmetric integer units,
30428 where one integer unit can do multiply and divides and the other
30429 can't, reduce the priority of multiply/divide so it is scheduled
30430 before other integer operations. */
30432 #if 0
30433 if (! INSN_P (insn))
30434 return priority;
30436 if (GET_CODE (PATTERN (insn)) == USE)
30437 return priority;
30439 switch (rs6000_cpu_attr) {
30440 case CPU_PPC750:
30441 switch (get_attr_type (insn))
30443 default:
30444 break;
30446 case TYPE_MUL:
30447 case TYPE_DIV:
30448 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30449 priority, priority);
30450 if (priority >= 0 && priority < 0x01000000)
30451 priority >>= 3;
30452 break;
30455 #endif
30457 if (insn_must_be_first_in_group (insn)
30458 && reload_completed
30459 && current_sched_info->sched_max_insns_priority
30460 && rs6000_sched_restricted_insns_priority)
30463 /* Prioritize insns that can be dispatched only in the first
30464 dispatch slot. */
30465 if (rs6000_sched_restricted_insns_priority == 1)
30466 /* Attach highest priority to insn. This means that in
30467 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30468 precede 'priority' (critical path) considerations. */
30469 return current_sched_info->sched_max_insns_priority;
30470 else if (rs6000_sched_restricted_insns_priority == 2)
30471 /* Increase priority of insn by a minimal amount. This means that in
30472 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30473 considerations precede dispatch-slot restriction considerations. */
30474 return (priority + 1);
30477 if (rs6000_cpu == PROCESSOR_POWER6
30478 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30479 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30480 /* Attach highest priority to insn if the scheduler has just issued two
30481 stores and this instruction is a load, or two loads and this instruction
30482 is a store. Power6 wants loads and stores scheduled alternately
30483 when possible */
30484 return current_sched_info->sched_max_insns_priority;
30486 return priority;
30489 /* Return true if the instruction is nonpipelined on the Cell. */
30490 static bool
30491 is_nonpipeline_insn (rtx_insn *insn)
30493 enum attr_type type;
30494 if (!insn || !NONDEBUG_INSN_P (insn)
30495 || GET_CODE (PATTERN (insn)) == USE
30496 || GET_CODE (PATTERN (insn)) == CLOBBER)
30497 return false;
30499 type = get_attr_type (insn);
30500 if (type == TYPE_MUL
30501 || type == TYPE_DIV
30502 || type == TYPE_SDIV
30503 || type == TYPE_DDIV
30504 || type == TYPE_SSQRT
30505 || type == TYPE_DSQRT
30506 || type == TYPE_MFCR
30507 || type == TYPE_MFCRF
30508 || type == TYPE_MFJMPR)
30510 return true;
30512 return false;
30516 /* Return how many instructions the machine can issue per cycle. */
30518 static int
30519 rs6000_issue_rate (void)
30521 /* Unless scheduling for register pressure, use issue rate of 1 for
30522 first scheduling pass to decrease degradation. */
30523 if (!reload_completed && !flag_sched_pressure)
30524 return 1;
30526 switch (rs6000_cpu_attr) {
30527 case CPU_RS64A:
30528 case CPU_PPC601: /* ? */
30529 case CPU_PPC7450:
30530 return 3;
30531 case CPU_PPC440:
30532 case CPU_PPC603:
30533 case CPU_PPC750:
30534 case CPU_PPC7400:
30535 case CPU_PPC8540:
30536 case CPU_PPC8548:
30537 case CPU_CELL:
30538 case CPU_PPCE300C2:
30539 case CPU_PPCE300C3:
30540 case CPU_PPCE500MC:
30541 case CPU_PPCE500MC64:
30542 case CPU_PPCE5500:
30543 case CPU_PPCE6500:
30544 case CPU_TITAN:
30545 return 2;
30546 case CPU_PPC476:
30547 case CPU_PPC604:
30548 case CPU_PPC604E:
30549 case CPU_PPC620:
30550 case CPU_PPC630:
30551 return 4;
30552 case CPU_POWER4:
30553 case CPU_POWER5:
30554 case CPU_POWER6:
30555 case CPU_POWER7:
30556 return 5;
30557 case CPU_POWER8:
30558 case CPU_POWER9:
30559 return 7;
30560 default:
30561 return 1;
30565 /* Return how many instructions to look ahead for better insn
30566 scheduling. */
30568 static int
30569 rs6000_use_sched_lookahead (void)
30571 switch (rs6000_cpu_attr)
30573 case CPU_PPC8540:
30574 case CPU_PPC8548:
30575 return 4;
30577 case CPU_CELL:
30578 return (reload_completed ? 8 : 0);
30580 default:
30581 return 0;
30585 /* We are choosing insn from the ready queue. Return zero if INSN can be
30586 chosen. */
30587 static int
30588 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30590 if (ready_index == 0)
30591 return 0;
30593 if (rs6000_cpu_attr != CPU_CELL)
30594 return 0;
30596 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30598 if (!reload_completed
30599 || is_nonpipeline_insn (insn)
30600 || is_microcoded_insn (insn))
30601 return 1;
30603 return 0;
30606 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30607 and return true. */
30609 static bool
30610 find_mem_ref (rtx pat, rtx *mem_ref)
30612 const char * fmt;
30613 int i, j;
30615 /* stack_tie does not produce any real memory traffic. */
30616 if (tie_operand (pat, VOIDmode))
30617 return false;
30619 if (GET_CODE (pat) == MEM)
30621 *mem_ref = pat;
30622 return true;
30625 /* Recursively process the pattern. */
30626 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30628 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30630 if (fmt[i] == 'e')
30632 if (find_mem_ref (XEXP (pat, i), mem_ref))
30633 return true;
30635 else if (fmt[i] == 'E')
30636 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30638 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30639 return true;
30643 return false;
30646 /* Determine if PAT is a PATTERN of a load insn. */
30648 static bool
30649 is_load_insn1 (rtx pat, rtx *load_mem)
30651 if (!pat || pat == NULL_RTX)
30652 return false;
30654 if (GET_CODE (pat) == SET)
30655 return find_mem_ref (SET_SRC (pat), load_mem);
30657 if (GET_CODE (pat) == PARALLEL)
30659 int i;
30661 for (i = 0; i < XVECLEN (pat, 0); i++)
30662 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30663 return true;
30666 return false;
30669 /* Determine if INSN loads from memory. */
30671 static bool
30672 is_load_insn (rtx insn, rtx *load_mem)
30674 if (!insn || !INSN_P (insn))
30675 return false;
30677 if (CALL_P (insn))
30678 return false;
30680 return is_load_insn1 (PATTERN (insn), load_mem);
30683 /* Determine if PAT is a PATTERN of a store insn. */
30685 static bool
30686 is_store_insn1 (rtx pat, rtx *str_mem)
30688 if (!pat || pat == NULL_RTX)
30689 return false;
30691 if (GET_CODE (pat) == SET)
30692 return find_mem_ref (SET_DEST (pat), str_mem);
30694 if (GET_CODE (pat) == PARALLEL)
30696 int i;
30698 for (i = 0; i < XVECLEN (pat, 0); i++)
30699 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30700 return true;
30703 return false;
30706 /* Determine if INSN stores to memory. */
30708 static bool
30709 is_store_insn (rtx insn, rtx *str_mem)
30711 if (!insn || !INSN_P (insn))
30712 return false;
30714 return is_store_insn1 (PATTERN (insn), str_mem);
30717 /* Returns whether the dependence between INSN and NEXT is considered
30718 costly by the given target. */
30720 static bool
30721 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30723 rtx insn;
30724 rtx next;
30725 rtx load_mem, str_mem;
30727 /* If the flag is not enabled - no dependence is considered costly;
30728 allow all dependent insns in the same group.
30729 This is the most aggressive option. */
30730 if (rs6000_sched_costly_dep == no_dep_costly)
30731 return false;
30733 /* If the flag is set to 1 - a dependence is always considered costly;
30734 do not allow dependent instructions in the same group.
30735 This is the most conservative option. */
30736 if (rs6000_sched_costly_dep == all_deps_costly)
30737 return true;
30739 insn = DEP_PRO (dep);
30740 next = DEP_CON (dep);
30742 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30743 && is_load_insn (next, &load_mem)
30744 && is_store_insn (insn, &str_mem))
30745 /* Prevent load after store in the same group. */
30746 return true;
30748 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30749 && is_load_insn (next, &load_mem)
30750 && is_store_insn (insn, &str_mem)
30751 && DEP_TYPE (dep) == REG_DEP_TRUE
30752 && mem_locations_overlap(str_mem, load_mem))
30753 /* Prevent load after store in the same group if it is a true
30754 dependence. */
30755 return true;
30757 /* The flag is set to X; dependences with latency >= X are considered costly,
30758 and will not be scheduled in the same group. */
30759 if (rs6000_sched_costly_dep <= max_dep_latency
30760 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30761 return true;
30763 return false;
30766 /* Return the next insn after INSN that is found before TAIL is reached,
30767 skipping any "non-active" insns - insns that will not actually occupy
30768 an issue slot. Return NULL_RTX if such an insn is not found. */
30770 static rtx_insn *
30771 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30773 if (insn == NULL_RTX || insn == tail)
30774 return NULL;
30776 while (1)
30778 insn = NEXT_INSN (insn);
30779 if (insn == NULL_RTX || insn == tail)
30780 return NULL;
30782 if (CALL_P (insn)
30783 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30784 || (NONJUMP_INSN_P (insn)
30785 && GET_CODE (PATTERN (insn)) != USE
30786 && GET_CODE (PATTERN (insn)) != CLOBBER
30787 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30788 break;
30790 return insn;
30793 /* We are about to begin issuing insns for this clock cycle. */
30795 static int
30796 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
30797 rtx_insn **ready ATTRIBUTE_UNUSED,
30798 int *pn_ready ATTRIBUTE_UNUSED,
30799 int clock_var ATTRIBUTE_UNUSED)
30801 int n_ready = *pn_ready;
30803 if (sched_verbose)
30804 fprintf (dump, "// rs6000_sched_reorder :\n");
30806 /* Reorder the ready list, if the second to last ready insn
30807 is a nonepipeline insn. */
30808 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
30810 if (is_nonpipeline_insn (ready[n_ready - 1])
30811 && (recog_memoized (ready[n_ready - 2]) > 0))
30812 /* Simply swap first two insns. */
30813 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
30816 if (rs6000_cpu == PROCESSOR_POWER6)
30817 load_store_pendulum = 0;
30819 return rs6000_issue_rate ();
30822 /* Like rs6000_sched_reorder, but called after issuing each insn. */
30824 static int
30825 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
30826 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
30828 if (sched_verbose)
30829 fprintf (dump, "// rs6000_sched_reorder2 :\n");
30831 /* For Power6, we need to handle some special cases to try and keep the
30832 store queue from overflowing and triggering expensive flushes.
30834 This code monitors how load and store instructions are being issued
30835 and skews the ready list one way or the other to increase the likelihood
30836 that a desired instruction is issued at the proper time.
30838 A couple of things are done. First, we maintain a "load_store_pendulum"
30839 to track the current state of load/store issue.
30841 - If the pendulum is at zero, then no loads or stores have been
30842 issued in the current cycle so we do nothing.
30844 - If the pendulum is 1, then a single load has been issued in this
30845 cycle and we attempt to locate another load in the ready list to
30846 issue with it.
30848 - If the pendulum is -2, then two stores have already been
30849 issued in this cycle, so we increase the priority of the first load
30850 in the ready list to increase it's likelihood of being chosen first
30851 in the next cycle.
30853 - If the pendulum is -1, then a single store has been issued in this
30854 cycle and we attempt to locate another store in the ready list to
30855 issue with it, preferring a store to an adjacent memory location to
30856 facilitate store pairing in the store queue.
30858 - If the pendulum is 2, then two loads have already been
30859 issued in this cycle, so we increase the priority of the first store
30860 in the ready list to increase it's likelihood of being chosen first
30861 in the next cycle.
30863 - If the pendulum < -2 or > 2, then do nothing.
30865 Note: This code covers the most common scenarios. There exist non
30866 load/store instructions which make use of the LSU and which
30867 would need to be accounted for to strictly model the behavior
30868 of the machine. Those instructions are currently unaccounted
30869 for to help minimize compile time overhead of this code.
30871 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
30873 int pos;
30874 int i;
30875 rtx_insn *tmp;
30876 rtx load_mem, str_mem;
30878 if (is_store_insn (last_scheduled_insn, &str_mem))
30879 /* Issuing a store, swing the load_store_pendulum to the left */
30880 load_store_pendulum--;
30881 else if (is_load_insn (last_scheduled_insn, &load_mem))
30882 /* Issuing a load, swing the load_store_pendulum to the right */
30883 load_store_pendulum++;
30884 else
30885 return cached_can_issue_more;
30887 /* If the pendulum is balanced, or there is only one instruction on
30888 the ready list, then all is well, so return. */
30889 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
30890 return cached_can_issue_more;
30892 if (load_store_pendulum == 1)
30894 /* A load has been issued in this cycle. Scan the ready list
30895 for another load to issue with it */
30896 pos = *pn_ready-1;
30898 while (pos >= 0)
30900 if (is_load_insn (ready[pos], &load_mem))
30902 /* Found a load. Move it to the head of the ready list,
30903 and adjust it's priority so that it is more likely to
30904 stay there */
30905 tmp = ready[pos];
30906 for (i=pos; i<*pn_ready-1; i++)
30907 ready[i] = ready[i + 1];
30908 ready[*pn_ready-1] = tmp;
30910 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
30911 INSN_PRIORITY (tmp)++;
30912 break;
30914 pos--;
30917 else if (load_store_pendulum == -2)
30919 /* Two stores have been issued in this cycle. Increase the
30920 priority of the first load in the ready list to favor it for
30921 issuing in the next cycle. */
30922 pos = *pn_ready-1;
30924 while (pos >= 0)
30926 if (is_load_insn (ready[pos], &load_mem)
30927 && !sel_sched_p ()
30928 && INSN_PRIORITY_KNOWN (ready[pos]))
30930 INSN_PRIORITY (ready[pos])++;
30932 /* Adjust the pendulum to account for the fact that a load
30933 was found and increased in priority. This is to prevent
30934 increasing the priority of multiple loads */
30935 load_store_pendulum--;
30937 break;
30939 pos--;
30942 else if (load_store_pendulum == -1)
30944 /* A store has been issued in this cycle. Scan the ready list for
30945 another store to issue with it, preferring a store to an adjacent
30946 memory location */
30947 int first_store_pos = -1;
30949 pos = *pn_ready-1;
30951 while (pos >= 0)
30953 if (is_store_insn (ready[pos], &str_mem))
30955 rtx str_mem2;
30956 /* Maintain the index of the first store found on the
30957 list */
30958 if (first_store_pos == -1)
30959 first_store_pos = pos;
30961 if (is_store_insn (last_scheduled_insn, &str_mem2)
30962 && adjacent_mem_locations (str_mem, str_mem2))
30964 /* Found an adjacent store. Move it to the head of the
30965 ready list, and adjust it's priority so that it is
30966 more likely to stay there */
30967 tmp = ready[pos];
30968 for (i=pos; i<*pn_ready-1; i++)
30969 ready[i] = ready[i + 1];
30970 ready[*pn_ready-1] = tmp;
30972 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
30973 INSN_PRIORITY (tmp)++;
30975 first_store_pos = -1;
30977 break;
30980 pos--;
30983 if (first_store_pos >= 0)
30985 /* An adjacent store wasn't found, but a non-adjacent store was,
30986 so move the non-adjacent store to the front of the ready
30987 list, and adjust its priority so that it is more likely to
30988 stay there. */
30989 tmp = ready[first_store_pos];
30990 for (i=first_store_pos; i<*pn_ready-1; i++)
30991 ready[i] = ready[i + 1];
30992 ready[*pn_ready-1] = tmp;
30993 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
30994 INSN_PRIORITY (tmp)++;
30997 else if (load_store_pendulum == 2)
30999 /* Two loads have been issued in this cycle. Increase the priority
31000 of the first store in the ready list to favor it for issuing in
31001 the next cycle. */
31002 pos = *pn_ready-1;
31004 while (pos >= 0)
31006 if (is_store_insn (ready[pos], &str_mem)
31007 && !sel_sched_p ()
31008 && INSN_PRIORITY_KNOWN (ready[pos]))
31010 INSN_PRIORITY (ready[pos])++;
31012 /* Adjust the pendulum to account for the fact that a store
31013 was found and increased in priority. This is to prevent
31014 increasing the priority of multiple stores */
31015 load_store_pendulum++;
31017 break;
31019 pos--;
31024 return cached_can_issue_more;
31027 /* Return whether the presence of INSN causes a dispatch group termination
31028 of group WHICH_GROUP.
31030 If WHICH_GROUP == current_group, this function will return true if INSN
31031 causes the termination of the current group (i.e, the dispatch group to
31032 which INSN belongs). This means that INSN will be the last insn in the
31033 group it belongs to.
31035 If WHICH_GROUP == previous_group, this function will return true if INSN
31036 causes the termination of the previous group (i.e, the dispatch group that
31037 precedes the group to which INSN belongs). This means that INSN will be
31038 the first insn in the group it belongs to). */
31040 static bool
31041 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31043 bool first, last;
31045 if (! insn)
31046 return false;
31048 first = insn_must_be_first_in_group (insn);
31049 last = insn_must_be_last_in_group (insn);
31051 if (first && last)
31052 return true;
31054 if (which_group == current_group)
31055 return last;
31056 else if (which_group == previous_group)
31057 return first;
31059 return false;
31063 static bool
31064 insn_must_be_first_in_group (rtx_insn *insn)
31066 enum attr_type type;
31068 if (!insn
31069 || NOTE_P (insn)
31070 || DEBUG_INSN_P (insn)
31071 || GET_CODE (PATTERN (insn)) == USE
31072 || GET_CODE (PATTERN (insn)) == CLOBBER)
31073 return false;
31075 switch (rs6000_cpu)
31077 case PROCESSOR_POWER5:
31078 if (is_cracked_insn (insn))
31079 return true;
31080 case PROCESSOR_POWER4:
31081 if (is_microcoded_insn (insn))
31082 return true;
31084 if (!rs6000_sched_groups)
31085 return false;
31087 type = get_attr_type (insn);
31089 switch (type)
31091 case TYPE_MFCR:
31092 case TYPE_MFCRF:
31093 case TYPE_MTCR:
31094 case TYPE_DELAYED_CR:
31095 case TYPE_CR_LOGICAL:
31096 case TYPE_MTJMPR:
31097 case TYPE_MFJMPR:
31098 case TYPE_DIV:
31099 case TYPE_LOAD_L:
31100 case TYPE_STORE_C:
31101 case TYPE_ISYNC:
31102 case TYPE_SYNC:
31103 return true;
31104 default:
31105 break;
31107 break;
31108 case PROCESSOR_POWER6:
31109 type = get_attr_type (insn);
31111 switch (type)
31113 case TYPE_EXTS:
31114 case TYPE_CNTLZ:
31115 case TYPE_TRAP:
31116 case TYPE_MUL:
31117 case TYPE_INSERT:
31118 case TYPE_FPCOMPARE:
31119 case TYPE_MFCR:
31120 case TYPE_MTCR:
31121 case TYPE_MFJMPR:
31122 case TYPE_MTJMPR:
31123 case TYPE_ISYNC:
31124 case TYPE_SYNC:
31125 case TYPE_LOAD_L:
31126 case TYPE_STORE_C:
31127 return true;
31128 case TYPE_SHIFT:
31129 if (get_attr_dot (insn) == DOT_NO
31130 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31131 return true;
31132 else
31133 break;
31134 case TYPE_DIV:
31135 if (get_attr_size (insn) == SIZE_32)
31136 return true;
31137 else
31138 break;
31139 case TYPE_LOAD:
31140 case TYPE_STORE:
31141 case TYPE_FPLOAD:
31142 case TYPE_FPSTORE:
31143 if (get_attr_update (insn) == UPDATE_YES)
31144 return true;
31145 else
31146 break;
31147 default:
31148 break;
31150 break;
31151 case PROCESSOR_POWER7:
31152 type = get_attr_type (insn);
31154 switch (type)
31156 case TYPE_CR_LOGICAL:
31157 case TYPE_MFCR:
31158 case TYPE_MFCRF:
31159 case TYPE_MTCR:
31160 case TYPE_DIV:
31161 case TYPE_ISYNC:
31162 case TYPE_LOAD_L:
31163 case TYPE_STORE_C:
31164 case TYPE_MFJMPR:
31165 case TYPE_MTJMPR:
31166 return true;
31167 case TYPE_MUL:
31168 case TYPE_SHIFT:
31169 case TYPE_EXTS:
31170 if (get_attr_dot (insn) == DOT_YES)
31171 return true;
31172 else
31173 break;
31174 case TYPE_LOAD:
31175 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31176 || get_attr_update (insn) == UPDATE_YES)
31177 return true;
31178 else
31179 break;
31180 case TYPE_STORE:
31181 case TYPE_FPLOAD:
31182 case TYPE_FPSTORE:
31183 if (get_attr_update (insn) == UPDATE_YES)
31184 return true;
31185 else
31186 break;
31187 default:
31188 break;
31190 break;
31191 case PROCESSOR_POWER8:
31192 case PROCESSOR_POWER9:
31193 type = get_attr_type (insn);
31195 switch (type)
31197 case TYPE_CR_LOGICAL:
31198 case TYPE_DELAYED_CR:
31199 case TYPE_MFCR:
31200 case TYPE_MFCRF:
31201 case TYPE_MTCR:
31202 case TYPE_SYNC:
31203 case TYPE_ISYNC:
31204 case TYPE_LOAD_L:
31205 case TYPE_STORE_C:
31206 case TYPE_VECSTORE:
31207 case TYPE_MFJMPR:
31208 case TYPE_MTJMPR:
31209 return true;
31210 case TYPE_SHIFT:
31211 case TYPE_EXTS:
31212 case TYPE_MUL:
31213 if (get_attr_dot (insn) == DOT_YES)
31214 return true;
31215 else
31216 break;
31217 case TYPE_LOAD:
31218 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31219 || get_attr_update (insn) == UPDATE_YES)
31220 return true;
31221 else
31222 break;
31223 case TYPE_STORE:
31224 if (get_attr_update (insn) == UPDATE_YES
31225 && get_attr_indexed (insn) == INDEXED_YES)
31226 return true;
31227 else
31228 break;
31229 default:
31230 break;
31232 break;
31233 default:
31234 break;
31237 return false;
31240 static bool
31241 insn_must_be_last_in_group (rtx_insn *insn)
31243 enum attr_type type;
31245 if (!insn
31246 || NOTE_P (insn)
31247 || DEBUG_INSN_P (insn)
31248 || GET_CODE (PATTERN (insn)) == USE
31249 || GET_CODE (PATTERN (insn)) == CLOBBER)
31250 return false;
31252 switch (rs6000_cpu) {
31253 case PROCESSOR_POWER4:
31254 case PROCESSOR_POWER5:
31255 if (is_microcoded_insn (insn))
31256 return true;
31258 if (is_branch_slot_insn (insn))
31259 return true;
31261 break;
31262 case PROCESSOR_POWER6:
31263 type = get_attr_type (insn);
31265 switch (type)
31267 case TYPE_EXTS:
31268 case TYPE_CNTLZ:
31269 case TYPE_TRAP:
31270 case TYPE_MUL:
31271 case TYPE_FPCOMPARE:
31272 case TYPE_MFCR:
31273 case TYPE_MTCR:
31274 case TYPE_MFJMPR:
31275 case TYPE_MTJMPR:
31276 case TYPE_ISYNC:
31277 case TYPE_SYNC:
31278 case TYPE_LOAD_L:
31279 case TYPE_STORE_C:
31280 return true;
31281 case TYPE_SHIFT:
31282 if (get_attr_dot (insn) == DOT_NO
31283 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31284 return true;
31285 else
31286 break;
31287 case TYPE_DIV:
31288 if (get_attr_size (insn) == SIZE_32)
31289 return true;
31290 else
31291 break;
31292 default:
31293 break;
31295 break;
31296 case PROCESSOR_POWER7:
31297 type = get_attr_type (insn);
31299 switch (type)
31301 case TYPE_ISYNC:
31302 case TYPE_SYNC:
31303 case TYPE_LOAD_L:
31304 case TYPE_STORE_C:
31305 return true;
31306 case TYPE_LOAD:
31307 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31308 && get_attr_update (insn) == UPDATE_YES)
31309 return true;
31310 else
31311 break;
31312 case TYPE_STORE:
31313 if (get_attr_update (insn) == UPDATE_YES
31314 && get_attr_indexed (insn) == INDEXED_YES)
31315 return true;
31316 else
31317 break;
31318 default:
31319 break;
31321 break;
31322 case PROCESSOR_POWER8:
31323 case PROCESSOR_POWER9:
31324 type = get_attr_type (insn);
31326 switch (type)
31328 case TYPE_MFCR:
31329 case TYPE_MTCR:
31330 case TYPE_ISYNC:
31331 case TYPE_SYNC:
31332 case TYPE_LOAD_L:
31333 case TYPE_STORE_C:
31334 return true;
31335 case TYPE_LOAD:
31336 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31337 && get_attr_update (insn) == UPDATE_YES)
31338 return true;
31339 else
31340 break;
31341 case TYPE_STORE:
31342 if (get_attr_update (insn) == UPDATE_YES
31343 && get_attr_indexed (insn) == INDEXED_YES)
31344 return true;
31345 else
31346 break;
31347 default:
31348 break;
31350 break;
31351 default:
31352 break;
31355 return false;
31358 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31359 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31361 static bool
31362 is_costly_group (rtx *group_insns, rtx next_insn)
31364 int i;
31365 int issue_rate = rs6000_issue_rate ();
31367 for (i = 0; i < issue_rate; i++)
31369 sd_iterator_def sd_it;
31370 dep_t dep;
31371 rtx insn = group_insns[i];
31373 if (!insn)
31374 continue;
31376 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31378 rtx next = DEP_CON (dep);
31380 if (next == next_insn
31381 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31382 return true;
31386 return false;
31389 /* Utility of the function redefine_groups.
31390 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31391 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31392 to keep it "far" (in a separate group) from GROUP_INSNS, following
31393 one of the following schemes, depending on the value of the flag
31394 -minsert_sched_nops = X:
31395 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31396 in order to force NEXT_INSN into a separate group.
31397 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31398 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31399 insertion (has a group just ended, how many vacant issue slots remain in the
31400 last group, and how many dispatch groups were encountered so far). */
31402 static int
31403 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31404 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31405 int *group_count)
31407 rtx nop;
31408 bool force;
31409 int issue_rate = rs6000_issue_rate ();
31410 bool end = *group_end;
31411 int i;
31413 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31414 return can_issue_more;
31416 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31417 return can_issue_more;
31419 force = is_costly_group (group_insns, next_insn);
31420 if (!force)
31421 return can_issue_more;
31423 if (sched_verbose > 6)
31424 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31425 *group_count ,can_issue_more);
31427 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31429 if (*group_end)
31430 can_issue_more = 0;
31432 /* Since only a branch can be issued in the last issue_slot, it is
31433 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31434 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31435 in this case the last nop will start a new group and the branch
31436 will be forced to the new group. */
31437 if (can_issue_more && !is_branch_slot_insn (next_insn))
31438 can_issue_more--;
31440 /* Do we have a special group ending nop? */
31441 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31442 || rs6000_cpu_attr == CPU_POWER8 || rs6000_cpu_attr == CPU_POWER9)
31444 nop = gen_group_ending_nop ();
31445 emit_insn_before (nop, next_insn);
31446 can_issue_more = 0;
31448 else
31449 while (can_issue_more > 0)
31451 nop = gen_nop ();
31452 emit_insn_before (nop, next_insn);
31453 can_issue_more--;
31456 *group_end = true;
31457 return 0;
31460 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31462 int n_nops = rs6000_sched_insert_nops;
31464 /* Nops can't be issued from the branch slot, so the effective
31465 issue_rate for nops is 'issue_rate - 1'. */
31466 if (can_issue_more == 0)
31467 can_issue_more = issue_rate;
31468 can_issue_more--;
31469 if (can_issue_more == 0)
31471 can_issue_more = issue_rate - 1;
31472 (*group_count)++;
31473 end = true;
31474 for (i = 0; i < issue_rate; i++)
31476 group_insns[i] = 0;
31480 while (n_nops > 0)
31482 nop = gen_nop ();
31483 emit_insn_before (nop, next_insn);
31484 if (can_issue_more == issue_rate - 1) /* new group begins */
31485 end = false;
31486 can_issue_more--;
31487 if (can_issue_more == 0)
31489 can_issue_more = issue_rate - 1;
31490 (*group_count)++;
31491 end = true;
31492 for (i = 0; i < issue_rate; i++)
31494 group_insns[i] = 0;
31497 n_nops--;
31500 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31501 can_issue_more++;
31503 /* Is next_insn going to start a new group? */
31504 *group_end
31505 = (end
31506 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31507 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31508 || (can_issue_more < issue_rate &&
31509 insn_terminates_group_p (next_insn, previous_group)));
31510 if (*group_end && end)
31511 (*group_count)--;
31513 if (sched_verbose > 6)
31514 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31515 *group_count, can_issue_more);
31516 return can_issue_more;
31519 return can_issue_more;
31522 /* This function tries to synch the dispatch groups that the compiler "sees"
31523 with the dispatch groups that the processor dispatcher is expected to
31524 form in practice. It tries to achieve this synchronization by forcing the
31525 estimated processor grouping on the compiler (as opposed to the function
31526 'pad_goups' which tries to force the scheduler's grouping on the processor).
31528 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31529 examines the (estimated) dispatch groups that will be formed by the processor
31530 dispatcher. It marks these group boundaries to reflect the estimated
31531 processor grouping, overriding the grouping that the scheduler had marked.
31532 Depending on the value of the flag '-minsert-sched-nops' this function can
31533 force certain insns into separate groups or force a certain distance between
31534 them by inserting nops, for example, if there exists a "costly dependence"
31535 between the insns.
31537 The function estimates the group boundaries that the processor will form as
31538 follows: It keeps track of how many vacant issue slots are available after
31539 each insn. A subsequent insn will start a new group if one of the following
31540 4 cases applies:
31541 - no more vacant issue slots remain in the current dispatch group.
31542 - only the last issue slot, which is the branch slot, is vacant, but the next
31543 insn is not a branch.
31544 - only the last 2 or less issue slots, including the branch slot, are vacant,
31545 which means that a cracked insn (which occupies two issue slots) can't be
31546 issued in this group.
31547 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31548 start a new group. */
31550 static int
31551 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31552 rtx_insn *tail)
31554 rtx_insn *insn, *next_insn;
31555 int issue_rate;
31556 int can_issue_more;
31557 int slot, i;
31558 bool group_end;
31559 int group_count = 0;
31560 rtx *group_insns;
31562 /* Initialize. */
31563 issue_rate = rs6000_issue_rate ();
31564 group_insns = XALLOCAVEC (rtx, issue_rate);
31565 for (i = 0; i < issue_rate; i++)
31567 group_insns[i] = 0;
31569 can_issue_more = issue_rate;
31570 slot = 0;
31571 insn = get_next_active_insn (prev_head_insn, tail);
31572 group_end = false;
31574 while (insn != NULL_RTX)
31576 slot = (issue_rate - can_issue_more);
31577 group_insns[slot] = insn;
31578 can_issue_more =
31579 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31580 if (insn_terminates_group_p (insn, current_group))
31581 can_issue_more = 0;
31583 next_insn = get_next_active_insn (insn, tail);
31584 if (next_insn == NULL_RTX)
31585 return group_count + 1;
31587 /* Is next_insn going to start a new group? */
31588 group_end
31589 = (can_issue_more == 0
31590 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31591 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31592 || (can_issue_more < issue_rate &&
31593 insn_terminates_group_p (next_insn, previous_group)));
31595 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31596 next_insn, &group_end, can_issue_more,
31597 &group_count);
31599 if (group_end)
31601 group_count++;
31602 can_issue_more = 0;
31603 for (i = 0; i < issue_rate; i++)
31605 group_insns[i] = 0;
31609 if (GET_MODE (next_insn) == TImode && can_issue_more)
31610 PUT_MODE (next_insn, VOIDmode);
31611 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31612 PUT_MODE (next_insn, TImode);
31614 insn = next_insn;
31615 if (can_issue_more == 0)
31616 can_issue_more = issue_rate;
31617 } /* while */
31619 return group_count;
31622 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31623 dispatch group boundaries that the scheduler had marked. Pad with nops
31624 any dispatch groups which have vacant issue slots, in order to force the
31625 scheduler's grouping on the processor dispatcher. The function
31626 returns the number of dispatch groups found. */
31628 static int
31629 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31630 rtx_insn *tail)
31632 rtx_insn *insn, *next_insn;
31633 rtx nop;
31634 int issue_rate;
31635 int can_issue_more;
31636 int group_end;
31637 int group_count = 0;
31639 /* Initialize issue_rate. */
31640 issue_rate = rs6000_issue_rate ();
31641 can_issue_more = issue_rate;
31643 insn = get_next_active_insn (prev_head_insn, tail);
31644 next_insn = get_next_active_insn (insn, tail);
31646 while (insn != NULL_RTX)
31648 can_issue_more =
31649 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31651 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31653 if (next_insn == NULL_RTX)
31654 break;
31656 if (group_end)
31658 /* If the scheduler had marked group termination at this location
31659 (between insn and next_insn), and neither insn nor next_insn will
31660 force group termination, pad the group with nops to force group
31661 termination. */
31662 if (can_issue_more
31663 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31664 && !insn_terminates_group_p (insn, current_group)
31665 && !insn_terminates_group_p (next_insn, previous_group))
31667 if (!is_branch_slot_insn (next_insn))
31668 can_issue_more--;
31670 while (can_issue_more)
31672 nop = gen_nop ();
31673 emit_insn_before (nop, next_insn);
31674 can_issue_more--;
31678 can_issue_more = issue_rate;
31679 group_count++;
31682 insn = next_insn;
31683 next_insn = get_next_active_insn (insn, tail);
31686 return group_count;
31689 /* We're beginning a new block. Initialize data structures as necessary. */
31691 static void
31692 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31693 int sched_verbose ATTRIBUTE_UNUSED,
31694 int max_ready ATTRIBUTE_UNUSED)
31696 last_scheduled_insn = NULL_RTX;
31697 load_store_pendulum = 0;
31700 /* The following function is called at the end of scheduling BB.
31701 After reload, it inserts nops at insn group bundling. */
31703 static void
31704 rs6000_sched_finish (FILE *dump, int sched_verbose)
31706 int n_groups;
31708 if (sched_verbose)
31709 fprintf (dump, "=== Finishing schedule.\n");
31711 if (reload_completed && rs6000_sched_groups)
31713 /* Do not run sched_finish hook when selective scheduling enabled. */
31714 if (sel_sched_p ())
31715 return;
31717 if (rs6000_sched_insert_nops == sched_finish_none)
31718 return;
31720 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
31721 n_groups = pad_groups (dump, sched_verbose,
31722 current_sched_info->prev_head,
31723 current_sched_info->next_tail);
31724 else
31725 n_groups = redefine_groups (dump, sched_verbose,
31726 current_sched_info->prev_head,
31727 current_sched_info->next_tail);
31729 if (sched_verbose >= 6)
31731 fprintf (dump, "ngroups = %d\n", n_groups);
31732 print_rtl (dump, current_sched_info->prev_head);
31733 fprintf (dump, "Done finish_sched\n");
31738 struct _rs6000_sched_context
31740 short cached_can_issue_more;
31741 rtx last_scheduled_insn;
31742 int load_store_pendulum;
31745 typedef struct _rs6000_sched_context rs6000_sched_context_def;
31746 typedef rs6000_sched_context_def *rs6000_sched_context_t;
31748 /* Allocate store for new scheduling context. */
31749 static void *
31750 rs6000_alloc_sched_context (void)
31752 return xmalloc (sizeof (rs6000_sched_context_def));
31755 /* If CLEAN_P is true then initializes _SC with clean data,
31756 and from the global context otherwise. */
31757 static void
31758 rs6000_init_sched_context (void *_sc, bool clean_p)
31760 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31762 if (clean_p)
31764 sc->cached_can_issue_more = 0;
31765 sc->last_scheduled_insn = NULL_RTX;
31766 sc->load_store_pendulum = 0;
31768 else
31770 sc->cached_can_issue_more = cached_can_issue_more;
31771 sc->last_scheduled_insn = last_scheduled_insn;
31772 sc->load_store_pendulum = load_store_pendulum;
31776 /* Sets the global scheduling context to the one pointed to by _SC. */
31777 static void
31778 rs6000_set_sched_context (void *_sc)
31780 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31782 gcc_assert (sc != NULL);
31784 cached_can_issue_more = sc->cached_can_issue_more;
31785 last_scheduled_insn = sc->last_scheduled_insn;
31786 load_store_pendulum = sc->load_store_pendulum;
31789 /* Free _SC. */
31790 static void
31791 rs6000_free_sched_context (void *_sc)
31793 gcc_assert (_sc != NULL);
31795 free (_sc);
31799 /* Length in units of the trampoline for entering a nested function. */
31802 rs6000_trampoline_size (void)
31804 int ret = 0;
31806 switch (DEFAULT_ABI)
31808 default:
31809 gcc_unreachable ();
31811 case ABI_AIX:
31812 ret = (TARGET_32BIT) ? 12 : 24;
31813 break;
31815 case ABI_ELFv2:
31816 gcc_assert (!TARGET_32BIT);
31817 ret = 32;
31818 break;
31820 case ABI_DARWIN:
31821 case ABI_V4:
31822 ret = (TARGET_32BIT) ? 40 : 48;
31823 break;
31826 return ret;
31829 /* Emit RTL insns to initialize the variable parts of a trampoline.
31830 FNADDR is an RTX for the address of the function's pure code.
31831 CXT is an RTX for the static chain value for the function. */
31833 static void
31834 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
31836 int regsize = (TARGET_32BIT) ? 4 : 8;
31837 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
31838 rtx ctx_reg = force_reg (Pmode, cxt);
31839 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
31841 switch (DEFAULT_ABI)
31843 default:
31844 gcc_unreachable ();
31846 /* Under AIX, just build the 3 word function descriptor */
31847 case ABI_AIX:
31849 rtx fnmem, fn_reg, toc_reg;
31851 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
31852 error ("You cannot take the address of a nested function if you use "
31853 "the -mno-pointers-to-nested-functions option.");
31855 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
31856 fn_reg = gen_reg_rtx (Pmode);
31857 toc_reg = gen_reg_rtx (Pmode);
31859 /* Macro to shorten the code expansions below. */
31860 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
31862 m_tramp = replace_equiv_address (m_tramp, addr);
31864 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
31865 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
31866 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
31867 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
31868 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
31870 # undef MEM_PLUS
31872 break;
31874 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
31875 case ABI_ELFv2:
31876 case ABI_DARWIN:
31877 case ABI_V4:
31878 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
31879 LCT_NORMAL, VOIDmode, 4,
31880 addr, Pmode,
31881 GEN_INT (rs6000_trampoline_size ()), SImode,
31882 fnaddr, Pmode,
31883 ctx_reg, Pmode);
31884 break;
31889 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
31890 identifier as an argument, so the front end shouldn't look it up. */
31892 static bool
31893 rs6000_attribute_takes_identifier_p (const_tree attr_id)
31895 return is_attribute_p ("altivec", attr_id);
31898 /* Handle the "altivec" attribute. The attribute may have
31899 arguments as follows:
31901 __attribute__((altivec(vector__)))
31902 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
31903 __attribute__((altivec(bool__))) (always followed by 'unsigned')
31905 and may appear more than once (e.g., 'vector bool char') in a
31906 given declaration. */
31908 static tree
31909 rs6000_handle_altivec_attribute (tree *node,
31910 tree name ATTRIBUTE_UNUSED,
31911 tree args,
31912 int flags ATTRIBUTE_UNUSED,
31913 bool *no_add_attrs)
31915 tree type = *node, result = NULL_TREE;
31916 machine_mode mode;
31917 int unsigned_p;
31918 char altivec_type
31919 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
31920 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
31921 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
31922 : '?');
31924 while (POINTER_TYPE_P (type)
31925 || TREE_CODE (type) == FUNCTION_TYPE
31926 || TREE_CODE (type) == METHOD_TYPE
31927 || TREE_CODE (type) == ARRAY_TYPE)
31928 type = TREE_TYPE (type);
31930 mode = TYPE_MODE (type);
31932 /* Check for invalid AltiVec type qualifiers. */
31933 if (type == long_double_type_node)
31934 error ("use of %<long double%> in AltiVec types is invalid");
31935 else if (type == boolean_type_node)
31936 error ("use of boolean types in AltiVec types is invalid");
31937 else if (TREE_CODE (type) == COMPLEX_TYPE)
31938 error ("use of %<complex%> in AltiVec types is invalid");
31939 else if (DECIMAL_FLOAT_MODE_P (mode))
31940 error ("use of decimal floating point types in AltiVec types is invalid");
31941 else if (!TARGET_VSX)
31943 if (type == long_unsigned_type_node || type == long_integer_type_node)
31945 if (TARGET_64BIT)
31946 error ("use of %<long%> in AltiVec types is invalid for "
31947 "64-bit code without -mvsx");
31948 else if (rs6000_warn_altivec_long)
31949 warning (0, "use of %<long%> in AltiVec types is deprecated; "
31950 "use %<int%>");
31952 else if (type == long_long_unsigned_type_node
31953 || type == long_long_integer_type_node)
31954 error ("use of %<long long%> in AltiVec types is invalid without "
31955 "-mvsx");
31956 else if (type == double_type_node)
31957 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
31960 switch (altivec_type)
31962 case 'v':
31963 unsigned_p = TYPE_UNSIGNED (type);
31964 switch (mode)
31966 case TImode:
31967 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
31968 break;
31969 case DImode:
31970 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
31971 break;
31972 case SImode:
31973 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
31974 break;
31975 case HImode:
31976 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
31977 break;
31978 case QImode:
31979 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
31980 break;
31981 case SFmode: result = V4SF_type_node; break;
31982 case DFmode: result = V2DF_type_node; break;
31983 /* If the user says 'vector int bool', we may be handed the 'bool'
31984 attribute _before_ the 'vector' attribute, and so select the
31985 proper type in the 'b' case below. */
31986 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
31987 case V2DImode: case V2DFmode:
31988 result = type;
31989 default: break;
31991 break;
31992 case 'b':
31993 switch (mode)
31995 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
31996 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
31997 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
31998 case QImode: case V16QImode: result = bool_V16QI_type_node;
31999 default: break;
32001 break;
32002 case 'p':
32003 switch (mode)
32005 case V8HImode: result = pixel_V8HI_type_node;
32006 default: break;
32008 default: break;
32011 /* Propagate qualifiers attached to the element type
32012 onto the vector type. */
32013 if (result && result != type && TYPE_QUALS (type))
32014 result = build_qualified_type (result, TYPE_QUALS (type));
32016 *no_add_attrs = true; /* No need to hang on to the attribute. */
32018 if (result)
32019 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32021 return NULL_TREE;
32024 /* AltiVec defines four built-in scalar types that serve as vector
32025 elements; we must teach the compiler how to mangle them. */
32027 static const char *
32028 rs6000_mangle_type (const_tree type)
32030 type = TYPE_MAIN_VARIANT (type);
32032 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32033 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32034 return NULL;
32036 if (type == bool_char_type_node) return "U6__boolc";
32037 if (type == bool_short_type_node) return "U6__bools";
32038 if (type == pixel_type_node) return "u7__pixel";
32039 if (type == bool_int_type_node) return "U6__booli";
32040 if (type == bool_long_type_node) return "U6__booll";
32042 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32043 "g" for IBM extended double, no matter whether it is long double (using
32044 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32045 if (TARGET_FLOAT128)
32047 if (type == ieee128_float_type_node)
32048 return "U10__float128";
32050 if (type == ibm128_float_type_node)
32051 return "g";
32053 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32054 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32057 /* Mangle IBM extended float long double as `g' (__float128) on
32058 powerpc*-linux where long-double-64 previously was the default. */
32059 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32060 && TARGET_ELF
32061 && TARGET_LONG_DOUBLE_128
32062 && !TARGET_IEEEQUAD)
32063 return "g";
32065 /* For all other types, use normal C++ mangling. */
32066 return NULL;
32069 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32070 struct attribute_spec.handler. */
32072 static tree
32073 rs6000_handle_longcall_attribute (tree *node, tree name,
32074 tree args ATTRIBUTE_UNUSED,
32075 int flags ATTRIBUTE_UNUSED,
32076 bool *no_add_attrs)
32078 if (TREE_CODE (*node) != FUNCTION_TYPE
32079 && TREE_CODE (*node) != FIELD_DECL
32080 && TREE_CODE (*node) != TYPE_DECL)
32082 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32083 name);
32084 *no_add_attrs = true;
32087 return NULL_TREE;
32090 /* Set longcall attributes on all functions declared when
32091 rs6000_default_long_calls is true. */
32092 static void
32093 rs6000_set_default_type_attributes (tree type)
32095 if (rs6000_default_long_calls
32096 && (TREE_CODE (type) == FUNCTION_TYPE
32097 || TREE_CODE (type) == METHOD_TYPE))
32098 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32099 NULL_TREE,
32100 TYPE_ATTRIBUTES (type));
32102 #if TARGET_MACHO
32103 darwin_set_default_type_attributes (type);
32104 #endif
32107 /* Return a reference suitable for calling a function with the
32108 longcall attribute. */
32111 rs6000_longcall_ref (rtx call_ref)
32113 const char *call_name;
32114 tree node;
32116 if (GET_CODE (call_ref) != SYMBOL_REF)
32117 return call_ref;
32119 /* System V adds '.' to the internal name, so skip them. */
32120 call_name = XSTR (call_ref, 0);
32121 if (*call_name == '.')
32123 while (*call_name == '.')
32124 call_name++;
32126 node = get_identifier (call_name);
32127 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32130 return force_reg (Pmode, call_ref);
32133 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32134 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32135 #endif
32137 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32138 struct attribute_spec.handler. */
32139 static tree
32140 rs6000_handle_struct_attribute (tree *node, tree name,
32141 tree args ATTRIBUTE_UNUSED,
32142 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32144 tree *type = NULL;
32145 if (DECL_P (*node))
32147 if (TREE_CODE (*node) == TYPE_DECL)
32148 type = &TREE_TYPE (*node);
32150 else
32151 type = node;
32153 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32154 || TREE_CODE (*type) == UNION_TYPE)))
32156 warning (OPT_Wattributes, "%qE attribute ignored", name);
32157 *no_add_attrs = true;
32160 else if ((is_attribute_p ("ms_struct", name)
32161 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32162 || ((is_attribute_p ("gcc_struct", name)
32163 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32165 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32166 name);
32167 *no_add_attrs = true;
32170 return NULL_TREE;
32173 static bool
32174 rs6000_ms_bitfield_layout_p (const_tree record_type)
32176 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32177 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32178 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32181 #ifdef USING_ELFOS_H
32183 /* A get_unnamed_section callback, used for switching to toc_section. */
32185 static void
32186 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32188 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32189 && TARGET_MINIMAL_TOC)
32191 if (!toc_initialized)
32193 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32194 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32195 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32196 fprintf (asm_out_file, "\t.tc ");
32197 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32198 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32199 fprintf (asm_out_file, "\n");
32201 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32202 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32203 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32204 fprintf (asm_out_file, " = .+32768\n");
32205 toc_initialized = 1;
32207 else
32208 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32210 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32212 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32213 if (!toc_initialized)
32215 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32216 toc_initialized = 1;
32219 else
32221 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32222 if (!toc_initialized)
32224 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32225 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32226 fprintf (asm_out_file, " = .+32768\n");
32227 toc_initialized = 1;
32232 /* Implement TARGET_ASM_INIT_SECTIONS. */
32234 static void
32235 rs6000_elf_asm_init_sections (void)
32237 toc_section
32238 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32240 sdata2_section
32241 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32242 SDATA2_SECTION_ASM_OP);
32245 /* Implement TARGET_SELECT_RTX_SECTION. */
32247 static section *
32248 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32249 unsigned HOST_WIDE_INT align)
32251 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32252 return toc_section;
32253 else
32254 return default_elf_select_rtx_section (mode, x, align);
32257 /* For a SYMBOL_REF, set generic flags and then perform some
32258 target-specific processing.
32260 When the AIX ABI is requested on a non-AIX system, replace the
32261 function name with the real name (with a leading .) rather than the
32262 function descriptor name. This saves a lot of overriding code to
32263 read the prefixes. */
32265 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32266 static void
32267 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32269 default_encode_section_info (decl, rtl, first);
32271 if (first
32272 && TREE_CODE (decl) == FUNCTION_DECL
32273 && !TARGET_AIX
32274 && DEFAULT_ABI == ABI_AIX)
32276 rtx sym_ref = XEXP (rtl, 0);
32277 size_t len = strlen (XSTR (sym_ref, 0));
32278 char *str = XALLOCAVEC (char, len + 2);
32279 str[0] = '.';
32280 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32281 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32285 static inline bool
32286 compare_section_name (const char *section, const char *templ)
32288 int len;
32290 len = strlen (templ);
32291 return (strncmp (section, templ, len) == 0
32292 && (section[len] == 0 || section[len] == '.'));
32295 bool
32296 rs6000_elf_in_small_data_p (const_tree decl)
32298 if (rs6000_sdata == SDATA_NONE)
32299 return false;
32301 /* We want to merge strings, so we never consider them small data. */
32302 if (TREE_CODE (decl) == STRING_CST)
32303 return false;
32305 /* Functions are never in the small data area. */
32306 if (TREE_CODE (decl) == FUNCTION_DECL)
32307 return false;
32309 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32311 const char *section = DECL_SECTION_NAME (decl);
32312 if (compare_section_name (section, ".sdata")
32313 || compare_section_name (section, ".sdata2")
32314 || compare_section_name (section, ".gnu.linkonce.s")
32315 || compare_section_name (section, ".sbss")
32316 || compare_section_name (section, ".sbss2")
32317 || compare_section_name (section, ".gnu.linkonce.sb")
32318 || strcmp (section, ".PPC.EMB.sdata0") == 0
32319 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32320 return true;
32322 else
32324 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32326 if (size > 0
32327 && size <= g_switch_value
32328 /* If it's not public, and we're not going to reference it there,
32329 there's no need to put it in the small data section. */
32330 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32331 return true;
32334 return false;
32337 #endif /* USING_ELFOS_H */
32339 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32341 static bool
32342 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32344 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32347 /* Do not place thread-local symbols refs in the object blocks. */
32349 static bool
32350 rs6000_use_blocks_for_decl_p (const_tree decl)
32352 return !DECL_THREAD_LOCAL_P (decl);
32355 /* Return a REG that occurs in ADDR with coefficient 1.
32356 ADDR can be effectively incremented by incrementing REG.
32358 r0 is special and we must not select it as an address
32359 register by this routine since our caller will try to
32360 increment the returned register via an "la" instruction. */
32363 find_addr_reg (rtx addr)
32365 while (GET_CODE (addr) == PLUS)
32367 if (GET_CODE (XEXP (addr, 0)) == REG
32368 && REGNO (XEXP (addr, 0)) != 0)
32369 addr = XEXP (addr, 0);
32370 else if (GET_CODE (XEXP (addr, 1)) == REG
32371 && REGNO (XEXP (addr, 1)) != 0)
32372 addr = XEXP (addr, 1);
32373 else if (CONSTANT_P (XEXP (addr, 0)))
32374 addr = XEXP (addr, 1);
32375 else if (CONSTANT_P (XEXP (addr, 1)))
32376 addr = XEXP (addr, 0);
32377 else
32378 gcc_unreachable ();
32380 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32381 return addr;
32384 void
32385 rs6000_fatal_bad_address (rtx op)
32387 fatal_insn ("bad address", op);
32390 #if TARGET_MACHO
32392 typedef struct branch_island_d {
32393 tree function_name;
32394 tree label_name;
32395 int line_number;
32396 } branch_island;
32399 static vec<branch_island, va_gc> *branch_islands;
32401 /* Remember to generate a branch island for far calls to the given
32402 function. */
32404 static void
32405 add_compiler_branch_island (tree label_name, tree function_name,
32406 int line_number)
32408 branch_island bi = {function_name, label_name, line_number};
32409 vec_safe_push (branch_islands, bi);
32412 /* Generate far-jump branch islands for everything recorded in
32413 branch_islands. Invoked immediately after the last instruction of
32414 the epilogue has been emitted; the branch islands must be appended
32415 to, and contiguous with, the function body. Mach-O stubs are
32416 generated in machopic_output_stub(). */
32418 static void
32419 macho_branch_islands (void)
32421 char tmp_buf[512];
32423 while (!vec_safe_is_empty (branch_islands))
32425 branch_island *bi = &branch_islands->last ();
32426 const char *label = IDENTIFIER_POINTER (bi->label_name);
32427 const char *name = IDENTIFIER_POINTER (bi->function_name);
32428 char name_buf[512];
32429 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32430 if (name[0] == '*' || name[0] == '&')
32431 strcpy (name_buf, name+1);
32432 else
32434 name_buf[0] = '_';
32435 strcpy (name_buf+1, name);
32437 strcpy (tmp_buf, "\n");
32438 strcat (tmp_buf, label);
32439 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32440 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32441 dbxout_stabd (N_SLINE, bi->line_number);
32442 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32443 if (flag_pic)
32445 if (TARGET_LINK_STACK)
32447 char name[32];
32448 get_ppc476_thunk_name (name);
32449 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32450 strcat (tmp_buf, name);
32451 strcat (tmp_buf, "\n");
32452 strcat (tmp_buf, label);
32453 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32455 else
32457 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32458 strcat (tmp_buf, label);
32459 strcat (tmp_buf, "_pic\n");
32460 strcat (tmp_buf, label);
32461 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32464 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32465 strcat (tmp_buf, name_buf);
32466 strcat (tmp_buf, " - ");
32467 strcat (tmp_buf, label);
32468 strcat (tmp_buf, "_pic)\n");
32470 strcat (tmp_buf, "\tmtlr r0\n");
32472 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32473 strcat (tmp_buf, name_buf);
32474 strcat (tmp_buf, " - ");
32475 strcat (tmp_buf, label);
32476 strcat (tmp_buf, "_pic)\n");
32478 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32480 else
32482 strcat (tmp_buf, ":\nlis r12,hi16(");
32483 strcat (tmp_buf, name_buf);
32484 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32485 strcat (tmp_buf, name_buf);
32486 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32488 output_asm_insn (tmp_buf, 0);
32489 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32490 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32491 dbxout_stabd (N_SLINE, bi->line_number);
32492 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32493 branch_islands->pop ();
32497 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32498 already there or not. */
32500 static int
32501 no_previous_def (tree function_name)
32503 branch_island *bi;
32504 unsigned ix;
32506 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32507 if (function_name == bi->function_name)
32508 return 0;
32509 return 1;
32512 /* GET_PREV_LABEL gets the label name from the previous definition of
32513 the function. */
32515 static tree
32516 get_prev_label (tree function_name)
32518 branch_island *bi;
32519 unsigned ix;
32521 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32522 if (function_name == bi->function_name)
32523 return bi->label_name;
32524 return NULL_TREE;
32527 /* INSN is either a function call or a millicode call. It may have an
32528 unconditional jump in its delay slot.
32530 CALL_DEST is the routine we are calling. */
32532 char *
32533 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32534 int cookie_operand_number)
32536 static char buf[256];
32537 if (darwin_emit_branch_islands
32538 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32539 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32541 tree labelname;
32542 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32544 if (no_previous_def (funname))
32546 rtx label_rtx = gen_label_rtx ();
32547 char *label_buf, temp_buf[256];
32548 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32549 CODE_LABEL_NUMBER (label_rtx));
32550 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32551 labelname = get_identifier (label_buf);
32552 add_compiler_branch_island (labelname, funname, insn_line (insn));
32554 else
32555 labelname = get_prev_label (funname);
32557 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32558 instruction will reach 'foo', otherwise link as 'bl L42'".
32559 "L42" should be a 'branch island', that will do a far jump to
32560 'foo'. Branch islands are generated in
32561 macho_branch_islands(). */
32562 sprintf (buf, "jbsr %%z%d,%.246s",
32563 dest_operand_number, IDENTIFIER_POINTER (labelname));
32565 else
32566 sprintf (buf, "bl %%z%d", dest_operand_number);
32567 return buf;
32570 /* Generate PIC and indirect symbol stubs. */
32572 void
32573 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32575 unsigned int length;
32576 char *symbol_name, *lazy_ptr_name;
32577 char *local_label_0;
32578 static int label = 0;
32580 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32581 symb = (*targetm.strip_name_encoding) (symb);
32584 length = strlen (symb);
32585 symbol_name = XALLOCAVEC (char, length + 32);
32586 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32588 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32589 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32591 if (flag_pic == 2)
32592 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32593 else
32594 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32596 if (flag_pic == 2)
32598 fprintf (file, "\t.align 5\n");
32600 fprintf (file, "%s:\n", stub);
32601 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32603 label++;
32604 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32605 sprintf (local_label_0, "\"L%011d$spb\"", label);
32607 fprintf (file, "\tmflr r0\n");
32608 if (TARGET_LINK_STACK)
32610 char name[32];
32611 get_ppc476_thunk_name (name);
32612 fprintf (file, "\tbl %s\n", name);
32613 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32615 else
32617 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32618 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32620 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32621 lazy_ptr_name, local_label_0);
32622 fprintf (file, "\tmtlr r0\n");
32623 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32624 (TARGET_64BIT ? "ldu" : "lwzu"),
32625 lazy_ptr_name, local_label_0);
32626 fprintf (file, "\tmtctr r12\n");
32627 fprintf (file, "\tbctr\n");
32629 else
32631 fprintf (file, "\t.align 4\n");
32633 fprintf (file, "%s:\n", stub);
32634 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32636 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32637 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32638 (TARGET_64BIT ? "ldu" : "lwzu"),
32639 lazy_ptr_name);
32640 fprintf (file, "\tmtctr r12\n");
32641 fprintf (file, "\tbctr\n");
32644 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32645 fprintf (file, "%s:\n", lazy_ptr_name);
32646 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32647 fprintf (file, "%sdyld_stub_binding_helper\n",
32648 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32651 /* Legitimize PIC addresses. If the address is already
32652 position-independent, we return ORIG. Newly generated
32653 position-independent addresses go into a reg. This is REG if non
32654 zero, otherwise we allocate register(s) as necessary. */
32656 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32659 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32660 rtx reg)
32662 rtx base, offset;
32664 if (reg == NULL && ! reload_in_progress && ! reload_completed)
32665 reg = gen_reg_rtx (Pmode);
32667 if (GET_CODE (orig) == CONST)
32669 rtx reg_temp;
32671 if (GET_CODE (XEXP (orig, 0)) == PLUS
32672 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32673 return orig;
32675 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32677 /* Use a different reg for the intermediate value, as
32678 it will be marked UNCHANGING. */
32679 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32680 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32681 Pmode, reg_temp);
32682 offset =
32683 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32684 Pmode, reg);
32686 if (GET_CODE (offset) == CONST_INT)
32688 if (SMALL_INT (offset))
32689 return plus_constant (Pmode, base, INTVAL (offset));
32690 else if (! reload_in_progress && ! reload_completed)
32691 offset = force_reg (Pmode, offset);
32692 else
32694 rtx mem = force_const_mem (Pmode, orig);
32695 return machopic_legitimize_pic_address (mem, Pmode, reg);
32698 return gen_rtx_PLUS (Pmode, base, offset);
32701 /* Fall back on generic machopic code. */
32702 return machopic_legitimize_pic_address (orig, mode, reg);
32705 /* Output a .machine directive for the Darwin assembler, and call
32706 the generic start_file routine. */
32708 static void
32709 rs6000_darwin_file_start (void)
32711 static const struct
32713 const char *arg;
32714 const char *name;
32715 HOST_WIDE_INT if_set;
32716 } mapping[] = {
32717 { "ppc64", "ppc64", MASK_64BIT },
32718 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
32719 { "power4", "ppc970", 0 },
32720 { "G5", "ppc970", 0 },
32721 { "7450", "ppc7450", 0 },
32722 { "7400", "ppc7400", MASK_ALTIVEC },
32723 { "G4", "ppc7400", 0 },
32724 { "750", "ppc750", 0 },
32725 { "740", "ppc750", 0 },
32726 { "G3", "ppc750", 0 },
32727 { "604e", "ppc604e", 0 },
32728 { "604", "ppc604", 0 },
32729 { "603e", "ppc603", 0 },
32730 { "603", "ppc603", 0 },
32731 { "601", "ppc601", 0 },
32732 { NULL, "ppc", 0 } };
32733 const char *cpu_id = "";
32734 size_t i;
32736 rs6000_file_start ();
32737 darwin_file_start ();
32739 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
32741 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
32742 cpu_id = rs6000_default_cpu;
32744 if (global_options_set.x_rs6000_cpu_index)
32745 cpu_id = processor_target_table[rs6000_cpu_index].name;
32747 /* Look through the mapping array. Pick the first name that either
32748 matches the argument, has a bit set in IF_SET that is also set
32749 in the target flags, or has a NULL name. */
32751 i = 0;
32752 while (mapping[i].arg != NULL
32753 && strcmp (mapping[i].arg, cpu_id) != 0
32754 && (mapping[i].if_set & rs6000_isa_flags) == 0)
32755 i++;
32757 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
32760 #endif /* TARGET_MACHO */
32762 #if TARGET_ELF
32763 static int
32764 rs6000_elf_reloc_rw_mask (void)
32766 if (flag_pic)
32767 return 3;
32768 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32769 return 2;
32770 else
32771 return 0;
32774 /* Record an element in the table of global constructors. SYMBOL is
32775 a SYMBOL_REF of the function to be called; PRIORITY is a number
32776 between 0 and MAX_INIT_PRIORITY.
32778 This differs from default_named_section_asm_out_constructor in
32779 that we have special handling for -mrelocatable. */
32781 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
32782 static void
32783 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
32785 const char *section = ".ctors";
32786 char buf[16];
32788 if (priority != DEFAULT_INIT_PRIORITY)
32790 sprintf (buf, ".ctors.%.5u",
32791 /* Invert the numbering so the linker puts us in the proper
32792 order; constructors are run from right to left, and the
32793 linker sorts in increasing order. */
32794 MAX_INIT_PRIORITY - priority);
32795 section = buf;
32798 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32799 assemble_align (POINTER_SIZE);
32801 if (DEFAULT_ABI == ABI_V4
32802 && (TARGET_RELOCATABLE || flag_pic > 1))
32804 fputs ("\t.long (", asm_out_file);
32805 output_addr_const (asm_out_file, symbol);
32806 fputs (")@fixup\n", asm_out_file);
32808 else
32809 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32812 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
32813 static void
32814 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
32816 const char *section = ".dtors";
32817 char buf[16];
32819 if (priority != DEFAULT_INIT_PRIORITY)
32821 sprintf (buf, ".dtors.%.5u",
32822 /* Invert the numbering so the linker puts us in the proper
32823 order; constructors are run from right to left, and the
32824 linker sorts in increasing order. */
32825 MAX_INIT_PRIORITY - priority);
32826 section = buf;
32829 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32830 assemble_align (POINTER_SIZE);
32832 if (DEFAULT_ABI == ABI_V4
32833 && (TARGET_RELOCATABLE || flag_pic > 1))
32835 fputs ("\t.long (", asm_out_file);
32836 output_addr_const (asm_out_file, symbol);
32837 fputs (")@fixup\n", asm_out_file);
32839 else
32840 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32843 void
32844 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
32846 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
32848 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
32849 ASM_OUTPUT_LABEL (file, name);
32850 fputs (DOUBLE_INT_ASM_OP, file);
32851 rs6000_output_function_entry (file, name);
32852 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
32853 if (DOT_SYMBOLS)
32855 fputs ("\t.size\t", file);
32856 assemble_name (file, name);
32857 fputs (",24\n\t.type\t.", file);
32858 assemble_name (file, name);
32859 fputs (",@function\n", file);
32860 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
32862 fputs ("\t.globl\t.", file);
32863 assemble_name (file, name);
32864 putc ('\n', file);
32867 else
32868 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32869 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
32870 rs6000_output_function_entry (file, name);
32871 fputs (":\n", file);
32872 return;
32875 if (DEFAULT_ABI == ABI_V4
32876 && (TARGET_RELOCATABLE || flag_pic > 1)
32877 && !TARGET_SECURE_PLT
32878 && (get_pool_size () != 0 || crtl->profile)
32879 && uses_TOC ())
32881 char buf[256];
32883 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
32885 fprintf (file, "\t.long ");
32886 assemble_name (file, toc_label_name);
32887 need_toc_init = 1;
32888 putc ('-', file);
32889 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
32890 assemble_name (file, buf);
32891 putc ('\n', file);
32894 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32895 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
32897 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
32899 char buf[256];
32901 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
32903 fprintf (file, "\t.quad .TOC.-");
32904 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
32905 assemble_name (file, buf);
32906 putc ('\n', file);
32909 if (DEFAULT_ABI == ABI_AIX)
32911 const char *desc_name, *orig_name;
32913 orig_name = (*targetm.strip_name_encoding) (name);
32914 desc_name = orig_name;
32915 while (*desc_name == '.')
32916 desc_name++;
32918 if (TREE_PUBLIC (decl))
32919 fprintf (file, "\t.globl %s\n", desc_name);
32921 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32922 fprintf (file, "%s:\n", desc_name);
32923 fprintf (file, "\t.long %s\n", orig_name);
32924 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
32925 fputs ("\t.long 0\n", file);
32926 fprintf (file, "\t.previous\n");
32928 ASM_OUTPUT_LABEL (file, name);
32931 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
32932 static void
32933 rs6000_elf_file_end (void)
32935 #ifdef HAVE_AS_GNU_ATTRIBUTE
32936 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
32938 if (rs6000_passes_float)
32939 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
32940 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
32941 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
32942 : 2));
32943 if (rs6000_passes_vector)
32944 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
32945 (TARGET_ALTIVEC_ABI ? 2
32946 : TARGET_SPE_ABI ? 3
32947 : 1));
32948 if (rs6000_returns_struct)
32949 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
32950 aix_struct_return ? 2 : 1);
32952 #endif
32953 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
32954 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
32955 file_end_indicate_exec_stack ();
32956 #endif
32958 if (flag_split_stack)
32959 file_end_indicate_split_stack ();
32961 if (cpu_builtin_p)
32963 /* We have expanded a CPU builtin, so we need to emit a reference to
32964 the special symbol that LIBC uses to declare it supports the
32965 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
32966 switch_to_section (data_section);
32967 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
32968 fprintf (asm_out_file, "\t%s %s\n",
32969 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
32972 #endif
32974 #if TARGET_XCOFF
32976 #ifndef HAVE_XCOFF_DWARF_EXTRAS
32977 #define HAVE_XCOFF_DWARF_EXTRAS 0
32978 #endif
32980 static enum unwind_info_type
32981 rs6000_xcoff_debug_unwind_info (void)
32983 return UI_NONE;
32986 static void
32987 rs6000_xcoff_asm_output_anchor (rtx symbol)
32989 char buffer[100];
32991 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
32992 SYMBOL_REF_BLOCK_OFFSET (symbol));
32993 fprintf (asm_out_file, "%s", SET_ASM_OP);
32994 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
32995 fprintf (asm_out_file, ",");
32996 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
32997 fprintf (asm_out_file, "\n");
33000 static void
33001 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33003 fputs (GLOBAL_ASM_OP, stream);
33004 RS6000_OUTPUT_BASENAME (stream, name);
33005 putc ('\n', stream);
33008 /* A get_unnamed_decl callback, used for read-only sections. PTR
33009 points to the section string variable. */
33011 static void
33012 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33014 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33015 *(const char *const *) directive,
33016 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33019 /* Likewise for read-write sections. */
33021 static void
33022 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33024 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33025 *(const char *const *) directive,
33026 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33029 static void
33030 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33032 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33033 *(const char *const *) directive,
33034 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33037 /* A get_unnamed_section callback, used for switching to toc_section. */
33039 static void
33040 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33042 if (TARGET_MINIMAL_TOC)
33044 /* toc_section is always selected at least once from
33045 rs6000_xcoff_file_start, so this is guaranteed to
33046 always be defined once and only once in each file. */
33047 if (!toc_initialized)
33049 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33050 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33051 toc_initialized = 1;
33053 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33054 (TARGET_32BIT ? "" : ",3"));
33056 else
33057 fputs ("\t.toc\n", asm_out_file);
33060 /* Implement TARGET_ASM_INIT_SECTIONS. */
33062 static void
33063 rs6000_xcoff_asm_init_sections (void)
33065 read_only_data_section
33066 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33067 &xcoff_read_only_section_name);
33069 private_data_section
33070 = get_unnamed_section (SECTION_WRITE,
33071 rs6000_xcoff_output_readwrite_section_asm_op,
33072 &xcoff_private_data_section_name);
33074 tls_data_section
33075 = get_unnamed_section (SECTION_TLS,
33076 rs6000_xcoff_output_tls_section_asm_op,
33077 &xcoff_tls_data_section_name);
33079 tls_private_data_section
33080 = get_unnamed_section (SECTION_TLS,
33081 rs6000_xcoff_output_tls_section_asm_op,
33082 &xcoff_private_data_section_name);
33084 read_only_private_data_section
33085 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33086 &xcoff_private_data_section_name);
33088 toc_section
33089 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33091 readonly_data_section = read_only_data_section;
33094 static int
33095 rs6000_xcoff_reloc_rw_mask (void)
33097 return 3;
33100 static void
33101 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33102 tree decl ATTRIBUTE_UNUSED)
33104 int smclass;
33105 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33107 if (flags & SECTION_EXCLUDE)
33108 smclass = 4;
33109 else if (flags & SECTION_DEBUG)
33111 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33112 return;
33114 else if (flags & SECTION_CODE)
33115 smclass = 0;
33116 else if (flags & SECTION_TLS)
33117 smclass = 3;
33118 else if (flags & SECTION_WRITE)
33119 smclass = 2;
33120 else
33121 smclass = 1;
33123 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33124 (flags & SECTION_CODE) ? "." : "",
33125 name, suffix[smclass], flags & SECTION_ENTSIZE);
33128 #define IN_NAMED_SECTION(DECL) \
33129 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33130 && DECL_SECTION_NAME (DECL) != NULL)
33132 static section *
33133 rs6000_xcoff_select_section (tree decl, int reloc,
33134 unsigned HOST_WIDE_INT align)
33136 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33137 named section. */
33138 if (align > BIGGEST_ALIGNMENT)
33140 resolve_unique_section (decl, reloc, true);
33141 if (IN_NAMED_SECTION (decl))
33142 return get_named_section (decl, NULL, reloc);
33145 if (decl_readonly_section (decl, reloc))
33147 if (TREE_PUBLIC (decl))
33148 return read_only_data_section;
33149 else
33150 return read_only_private_data_section;
33152 else
33154 #if HAVE_AS_TLS
33155 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33157 if (TREE_PUBLIC (decl))
33158 return tls_data_section;
33159 else if (bss_initializer_p (decl))
33161 /* Convert to COMMON to emit in BSS. */
33162 DECL_COMMON (decl) = 1;
33163 return tls_comm_section;
33165 else
33166 return tls_private_data_section;
33168 else
33169 #endif
33170 if (TREE_PUBLIC (decl))
33171 return data_section;
33172 else
33173 return private_data_section;
33177 static void
33178 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33180 const char *name;
33182 /* Use select_section for private data and uninitialized data with
33183 alignment <= BIGGEST_ALIGNMENT. */
33184 if (!TREE_PUBLIC (decl)
33185 || DECL_COMMON (decl)
33186 || (DECL_INITIAL (decl) == NULL_TREE
33187 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33188 || DECL_INITIAL (decl) == error_mark_node
33189 || (flag_zero_initialized_in_bss
33190 && initializer_zerop (DECL_INITIAL (decl))))
33191 return;
33193 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33194 name = (*targetm.strip_name_encoding) (name);
33195 set_decl_section_name (decl, name);
33198 /* Select section for constant in constant pool.
33200 On RS/6000, all constants are in the private read-only data area.
33201 However, if this is being placed in the TOC it must be output as a
33202 toc entry. */
33204 static section *
33205 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33206 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33208 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33209 return toc_section;
33210 else
33211 return read_only_private_data_section;
33214 /* Remove any trailing [DS] or the like from the symbol name. */
33216 static const char *
33217 rs6000_xcoff_strip_name_encoding (const char *name)
33219 size_t len;
33220 if (*name == '*')
33221 name++;
33222 len = strlen (name);
33223 if (name[len - 1] == ']')
33224 return ggc_alloc_string (name, len - 4);
33225 else
33226 return name;
33229 /* Section attributes. AIX is always PIC. */
33231 static unsigned int
33232 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33234 unsigned int align;
33235 unsigned int flags = default_section_type_flags (decl, name, reloc);
33237 /* Align to at least UNIT size. */
33238 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33239 align = MIN_UNITS_PER_WORD;
33240 else
33241 /* Increase alignment of large objects if not already stricter. */
33242 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33243 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33244 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33246 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33249 /* Output at beginning of assembler file.
33251 Initialize the section names for the RS/6000 at this point.
33253 Specify filename, including full path, to assembler.
33255 We want to go into the TOC section so at least one .toc will be emitted.
33256 Also, in order to output proper .bs/.es pairs, we need at least one static
33257 [RW] section emitted.
33259 Finally, declare mcount when profiling to make the assembler happy. */
33261 static void
33262 rs6000_xcoff_file_start (void)
33264 rs6000_gen_section_name (&xcoff_bss_section_name,
33265 main_input_filename, ".bss_");
33266 rs6000_gen_section_name (&xcoff_private_data_section_name,
33267 main_input_filename, ".rw_");
33268 rs6000_gen_section_name (&xcoff_read_only_section_name,
33269 main_input_filename, ".ro_");
33270 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33271 main_input_filename, ".tls_");
33272 rs6000_gen_section_name (&xcoff_tbss_section_name,
33273 main_input_filename, ".tbss_[UL]");
33275 fputs ("\t.file\t", asm_out_file);
33276 output_quoted_string (asm_out_file, main_input_filename);
33277 fputc ('\n', asm_out_file);
33278 if (write_symbols != NO_DEBUG)
33279 switch_to_section (private_data_section);
33280 switch_to_section (toc_section);
33281 switch_to_section (text_section);
33282 if (profile_flag)
33283 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33284 rs6000_file_start ();
33287 /* Output at end of assembler file.
33288 On the RS/6000, referencing data should automatically pull in text. */
33290 static void
33291 rs6000_xcoff_file_end (void)
33293 switch_to_section (text_section);
33294 fputs ("_section_.text:\n", asm_out_file);
33295 switch_to_section (data_section);
33296 fputs (TARGET_32BIT
33297 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33298 asm_out_file);
33301 struct declare_alias_data
33303 FILE *file;
33304 bool function_descriptor;
33307 /* Declare alias N. A helper function for for_node_and_aliases. */
33309 static bool
33310 rs6000_declare_alias (struct symtab_node *n, void *d)
33312 struct declare_alias_data *data = (struct declare_alias_data *)d;
33313 /* Main symbol is output specially, because varasm machinery does part of
33314 the job for us - we do not need to declare .globl/lglobs and such. */
33315 if (!n->alias || n->weakref)
33316 return false;
33318 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33319 return false;
33321 /* Prevent assemble_alias from trying to use .set pseudo operation
33322 that does not behave as expected by the middle-end. */
33323 TREE_ASM_WRITTEN (n->decl) = true;
33325 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33326 char *buffer = (char *) alloca (strlen (name) + 2);
33327 char *p;
33328 int dollar_inside = 0;
33330 strcpy (buffer, name);
33331 p = strchr (buffer, '$');
33332 while (p) {
33333 *p = '_';
33334 dollar_inside++;
33335 p = strchr (p + 1, '$');
33337 if (TREE_PUBLIC (n->decl))
33339 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33341 if (dollar_inside) {
33342 if (data->function_descriptor)
33343 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33344 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33346 if (data->function_descriptor)
33348 fputs ("\t.globl .", data->file);
33349 RS6000_OUTPUT_BASENAME (data->file, buffer);
33350 putc ('\n', data->file);
33352 fputs ("\t.globl ", data->file);
33353 RS6000_OUTPUT_BASENAME (data->file, buffer);
33354 putc ('\n', data->file);
33356 #ifdef ASM_WEAKEN_DECL
33357 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33358 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33359 #endif
33361 else
33363 if (dollar_inside)
33365 if (data->function_descriptor)
33366 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33367 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33369 if (data->function_descriptor)
33371 fputs ("\t.lglobl .", data->file);
33372 RS6000_OUTPUT_BASENAME (data->file, buffer);
33373 putc ('\n', data->file);
33375 fputs ("\t.lglobl ", data->file);
33376 RS6000_OUTPUT_BASENAME (data->file, buffer);
33377 putc ('\n', data->file);
33379 if (data->function_descriptor)
33380 fputs (".", data->file);
33381 RS6000_OUTPUT_BASENAME (data->file, buffer);
33382 fputs (":\n", data->file);
33383 return false;
33386 /* This macro produces the initial definition of a function name.
33387 On the RS/6000, we need to place an extra '.' in the function name and
33388 output the function descriptor.
33389 Dollar signs are converted to underscores.
33391 The csect for the function will have already been created when
33392 text_section was selected. We do have to go back to that csect, however.
33394 The third and fourth parameters to the .function pseudo-op (16 and 044)
33395 are placeholders which no longer have any use.
33397 Because AIX assembler's .set command has unexpected semantics, we output
33398 all aliases as alternative labels in front of the definition. */
33400 void
33401 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33403 char *buffer = (char *) alloca (strlen (name) + 1);
33404 char *p;
33405 int dollar_inside = 0;
33406 struct declare_alias_data data = {file, false};
33408 strcpy (buffer, name);
33409 p = strchr (buffer, '$');
33410 while (p) {
33411 *p = '_';
33412 dollar_inside++;
33413 p = strchr (p + 1, '$');
33415 if (TREE_PUBLIC (decl))
33417 if (!RS6000_WEAK || !DECL_WEAK (decl))
33419 if (dollar_inside) {
33420 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33421 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33423 fputs ("\t.globl .", file);
33424 RS6000_OUTPUT_BASENAME (file, buffer);
33425 putc ('\n', file);
33428 else
33430 if (dollar_inside) {
33431 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33432 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33434 fputs ("\t.lglobl .", file);
33435 RS6000_OUTPUT_BASENAME (file, buffer);
33436 putc ('\n', file);
33438 fputs ("\t.csect ", file);
33439 RS6000_OUTPUT_BASENAME (file, buffer);
33440 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33441 RS6000_OUTPUT_BASENAME (file, buffer);
33442 fputs (":\n", file);
33443 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33444 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33445 RS6000_OUTPUT_BASENAME (file, buffer);
33446 fputs (", TOC[tc0], 0\n", file);
33447 in_section = NULL;
33448 switch_to_section (function_section (decl));
33449 putc ('.', file);
33450 RS6000_OUTPUT_BASENAME (file, buffer);
33451 fputs (":\n", file);
33452 data.function_descriptor = true;
33453 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33454 if (!DECL_IGNORED_P (decl))
33456 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33457 xcoffout_declare_function (file, decl, buffer);
33458 else if (write_symbols == DWARF2_DEBUG)
33460 name = (*targetm.strip_name_encoding) (name);
33461 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33464 return;
33467 /* This macro produces the initial definition of a object (variable) name.
33468 Because AIX assembler's .set command has unexpected semantics, we output
33469 all aliases as alternative labels in front of the definition. */
33471 void
33472 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33474 struct declare_alias_data data = {file, false};
33475 RS6000_OUTPUT_BASENAME (file, name);
33476 fputs (":\n", file);
33477 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
33480 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33482 void
33483 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33485 fputs (integer_asm_op (size, FALSE), file);
33486 assemble_name (file, label);
33487 fputs ("-$", file);
33490 /* Output a symbol offset relative to the dbase for the current object.
33491 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33492 signed offsets.
33494 __gcc_unwind_dbase is embedded in all executables/libraries through
33495 libgcc/config/rs6000/crtdbase.S. */
33497 void
33498 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33500 fputs (integer_asm_op (size, FALSE), file);
33501 assemble_name (file, label);
33502 fputs("-__gcc_unwind_dbase", file);
33505 #ifdef HAVE_AS_TLS
33506 static void
33507 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33509 rtx symbol;
33510 int flags;
33512 default_encode_section_info (decl, rtl, first);
33514 /* Careful not to prod global register variables. */
33515 if (!MEM_P (rtl))
33516 return;
33517 symbol = XEXP (rtl, 0);
33518 if (GET_CODE (symbol) != SYMBOL_REF)
33519 return;
33521 flags = SYMBOL_REF_FLAGS (symbol);
33523 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33524 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33526 SYMBOL_REF_FLAGS (symbol) = flags;
33528 #endif /* HAVE_AS_TLS */
33529 #endif /* TARGET_XCOFF */
33531 /* Return true if INSN should not be copied. */
33533 static bool
33534 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33536 return recog_memoized (insn) >= 0
33537 && get_attr_cannot_copy (insn);
33540 /* Compute a (partial) cost for rtx X. Return true if the complete
33541 cost has been computed, and false if subexpressions should be
33542 scanned. In either case, *TOTAL contains the cost result. */
33544 static bool
33545 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33546 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33548 int code = GET_CODE (x);
33550 switch (code)
33552 /* On the RS/6000, if it is valid in the insn, it is free. */
33553 case CONST_INT:
33554 if (((outer_code == SET
33555 || outer_code == PLUS
33556 || outer_code == MINUS)
33557 && (satisfies_constraint_I (x)
33558 || satisfies_constraint_L (x)))
33559 || (outer_code == AND
33560 && (satisfies_constraint_K (x)
33561 || (mode == SImode
33562 ? satisfies_constraint_L (x)
33563 : satisfies_constraint_J (x))))
33564 || ((outer_code == IOR || outer_code == XOR)
33565 && (satisfies_constraint_K (x)
33566 || (mode == SImode
33567 ? satisfies_constraint_L (x)
33568 : satisfies_constraint_J (x))))
33569 || outer_code == ASHIFT
33570 || outer_code == ASHIFTRT
33571 || outer_code == LSHIFTRT
33572 || outer_code == ROTATE
33573 || outer_code == ROTATERT
33574 || outer_code == ZERO_EXTRACT
33575 || (outer_code == MULT
33576 && satisfies_constraint_I (x))
33577 || ((outer_code == DIV || outer_code == UDIV
33578 || outer_code == MOD || outer_code == UMOD)
33579 && exact_log2 (INTVAL (x)) >= 0)
33580 || (outer_code == COMPARE
33581 && (satisfies_constraint_I (x)
33582 || satisfies_constraint_K (x)))
33583 || ((outer_code == EQ || outer_code == NE)
33584 && (satisfies_constraint_I (x)
33585 || satisfies_constraint_K (x)
33586 || (mode == SImode
33587 ? satisfies_constraint_L (x)
33588 : satisfies_constraint_J (x))))
33589 || (outer_code == GTU
33590 && satisfies_constraint_I (x))
33591 || (outer_code == LTU
33592 && satisfies_constraint_P (x)))
33594 *total = 0;
33595 return true;
33597 else if ((outer_code == PLUS
33598 && reg_or_add_cint_operand (x, VOIDmode))
33599 || (outer_code == MINUS
33600 && reg_or_sub_cint_operand (x, VOIDmode))
33601 || ((outer_code == SET
33602 || outer_code == IOR
33603 || outer_code == XOR)
33604 && (INTVAL (x)
33605 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
33607 *total = COSTS_N_INSNS (1);
33608 return true;
33610 /* FALLTHRU */
33612 case CONST_DOUBLE:
33613 case CONST_WIDE_INT:
33614 case CONST:
33615 case HIGH:
33616 case SYMBOL_REF:
33617 case MEM:
33618 /* When optimizing for size, MEM should be slightly more expensive
33619 than generating address, e.g., (plus (reg) (const)).
33620 L1 cache latency is about two instructions. */
33621 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33622 return true;
33624 case LABEL_REF:
33625 *total = 0;
33626 return true;
33628 case PLUS:
33629 case MINUS:
33630 if (FLOAT_MODE_P (mode))
33631 *total = rs6000_cost->fp;
33632 else
33633 *total = COSTS_N_INSNS (1);
33634 return false;
33636 case MULT:
33637 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33638 && satisfies_constraint_I (XEXP (x, 1)))
33640 if (INTVAL (XEXP (x, 1)) >= -256
33641 && INTVAL (XEXP (x, 1)) <= 255)
33642 *total = rs6000_cost->mulsi_const9;
33643 else
33644 *total = rs6000_cost->mulsi_const;
33646 else if (mode == SFmode)
33647 *total = rs6000_cost->fp;
33648 else if (FLOAT_MODE_P (mode))
33649 *total = rs6000_cost->dmul;
33650 else if (mode == DImode)
33651 *total = rs6000_cost->muldi;
33652 else
33653 *total = rs6000_cost->mulsi;
33654 return false;
33656 case FMA:
33657 if (mode == SFmode)
33658 *total = rs6000_cost->fp;
33659 else
33660 *total = rs6000_cost->dmul;
33661 break;
33663 case DIV:
33664 case MOD:
33665 if (FLOAT_MODE_P (mode))
33667 *total = mode == DFmode ? rs6000_cost->ddiv
33668 : rs6000_cost->sdiv;
33669 return false;
33671 /* FALLTHRU */
33673 case UDIV:
33674 case UMOD:
33675 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33676 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
33678 if (code == DIV || code == MOD)
33679 /* Shift, addze */
33680 *total = COSTS_N_INSNS (2);
33681 else
33682 /* Shift */
33683 *total = COSTS_N_INSNS (1);
33685 else
33687 if (GET_MODE (XEXP (x, 1)) == DImode)
33688 *total = rs6000_cost->divdi;
33689 else
33690 *total = rs6000_cost->divsi;
33692 /* Add in shift and subtract for MOD unless we have a mod instruction. */
33693 if (!TARGET_MODULO && (code == MOD || code == UMOD))
33694 *total += COSTS_N_INSNS (2);
33695 return false;
33697 case CTZ:
33698 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
33699 return false;
33701 case FFS:
33702 *total = COSTS_N_INSNS (4);
33703 return false;
33705 case POPCOUNT:
33706 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
33707 return false;
33709 case PARITY:
33710 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
33711 return false;
33713 case NOT:
33714 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
33715 *total = 0;
33716 else
33717 *total = COSTS_N_INSNS (1);
33718 return false;
33720 case AND:
33721 if (CONST_INT_P (XEXP (x, 1)))
33723 rtx left = XEXP (x, 0);
33724 rtx_code left_code = GET_CODE (left);
33726 /* rotate-and-mask: 1 insn. */
33727 if ((left_code == ROTATE
33728 || left_code == ASHIFT
33729 || left_code == LSHIFTRT)
33730 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
33732 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
33733 if (!CONST_INT_P (XEXP (left, 1)))
33734 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
33735 *total += COSTS_N_INSNS (1);
33736 return true;
33739 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
33740 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
33741 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
33742 || (val & 0xffff) == val
33743 || (val & 0xffff0000) == val
33744 || ((val & 0xffff) == 0 && mode == SImode))
33746 *total = rtx_cost (left, mode, AND, 0, speed);
33747 *total += COSTS_N_INSNS (1);
33748 return true;
33751 /* 2 insns. */
33752 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
33754 *total = rtx_cost (left, mode, AND, 0, speed);
33755 *total += COSTS_N_INSNS (2);
33756 return true;
33760 *total = COSTS_N_INSNS (1);
33761 return false;
33763 case IOR:
33764 /* FIXME */
33765 *total = COSTS_N_INSNS (1);
33766 return true;
33768 case CLZ:
33769 case XOR:
33770 case ZERO_EXTRACT:
33771 *total = COSTS_N_INSNS (1);
33772 return false;
33774 case ASHIFT:
33775 /* The EXTSWSLI instruction is a combined instruction. Don't count both
33776 the sign extend and shift separately within the insn. */
33777 if (TARGET_EXTSWSLI && mode == DImode
33778 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
33779 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
33781 *total = 0;
33782 return false;
33784 /* fall through */
33786 case ASHIFTRT:
33787 case LSHIFTRT:
33788 case ROTATE:
33789 case ROTATERT:
33790 /* Handle mul_highpart. */
33791 if (outer_code == TRUNCATE
33792 && GET_CODE (XEXP (x, 0)) == MULT)
33794 if (mode == DImode)
33795 *total = rs6000_cost->muldi;
33796 else
33797 *total = rs6000_cost->mulsi;
33798 return true;
33800 else if (outer_code == AND)
33801 *total = 0;
33802 else
33803 *total = COSTS_N_INSNS (1);
33804 return false;
33806 case SIGN_EXTEND:
33807 case ZERO_EXTEND:
33808 if (GET_CODE (XEXP (x, 0)) == MEM)
33809 *total = 0;
33810 else
33811 *total = COSTS_N_INSNS (1);
33812 return false;
33814 case COMPARE:
33815 case NEG:
33816 case ABS:
33817 if (!FLOAT_MODE_P (mode))
33819 *total = COSTS_N_INSNS (1);
33820 return false;
33822 /* FALLTHRU */
33824 case FLOAT:
33825 case UNSIGNED_FLOAT:
33826 case FIX:
33827 case UNSIGNED_FIX:
33828 case FLOAT_TRUNCATE:
33829 *total = rs6000_cost->fp;
33830 return false;
33832 case FLOAT_EXTEND:
33833 if (mode == DFmode)
33834 *total = rs6000_cost->sfdf_convert;
33835 else
33836 *total = rs6000_cost->fp;
33837 return false;
33839 case UNSPEC:
33840 switch (XINT (x, 1))
33842 case UNSPEC_FRSP:
33843 *total = rs6000_cost->fp;
33844 return true;
33846 default:
33847 break;
33849 break;
33851 case CALL:
33852 case IF_THEN_ELSE:
33853 if (!speed)
33855 *total = COSTS_N_INSNS (1);
33856 return true;
33858 else if (FLOAT_MODE_P (mode)
33859 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
33861 *total = rs6000_cost->fp;
33862 return false;
33864 break;
33866 case NE:
33867 case EQ:
33868 case GTU:
33869 case LTU:
33870 /* Carry bit requires mode == Pmode.
33871 NEG or PLUS already counted so only add one. */
33872 if (mode == Pmode
33873 && (outer_code == NEG || outer_code == PLUS))
33875 *total = COSTS_N_INSNS (1);
33876 return true;
33878 if (outer_code == SET)
33880 if (XEXP (x, 1) == const0_rtx)
33882 if (TARGET_ISEL && !TARGET_MFCRF)
33883 *total = COSTS_N_INSNS (8);
33884 else
33885 *total = COSTS_N_INSNS (2);
33886 return true;
33888 else
33890 *total = COSTS_N_INSNS (3);
33891 return false;
33894 /* FALLTHRU */
33896 case GT:
33897 case LT:
33898 case UNORDERED:
33899 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
33901 if (TARGET_ISEL && !TARGET_MFCRF)
33902 *total = COSTS_N_INSNS (8);
33903 else
33904 *total = COSTS_N_INSNS (2);
33905 return true;
33907 /* CC COMPARE. */
33908 if (outer_code == COMPARE)
33910 *total = 0;
33911 return true;
33913 break;
33915 default:
33916 break;
33919 return false;
33922 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
33924 static bool
33925 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
33926 int opno, int *total, bool speed)
33928 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
33930 fprintf (stderr,
33931 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
33932 "opno = %d, total = %d, speed = %s, x:\n",
33933 ret ? "complete" : "scan inner",
33934 GET_MODE_NAME (mode),
33935 GET_RTX_NAME (outer_code),
33936 opno,
33937 *total,
33938 speed ? "true" : "false");
33940 debug_rtx (x);
33942 return ret;
33945 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
33947 static int
33948 rs6000_debug_address_cost (rtx x, machine_mode mode,
33949 addr_space_t as, bool speed)
33951 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
33953 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
33954 ret, speed ? "true" : "false");
33955 debug_rtx (x);
33957 return ret;
33961 /* A C expression returning the cost of moving data from a register of class
33962 CLASS1 to one of CLASS2. */
33964 static int
33965 rs6000_register_move_cost (machine_mode mode,
33966 reg_class_t from, reg_class_t to)
33968 int ret;
33970 if (TARGET_DEBUG_COST)
33971 dbg_cost_ctrl++;
33973 /* Moves from/to GENERAL_REGS. */
33974 if (reg_classes_intersect_p (to, GENERAL_REGS)
33975 || reg_classes_intersect_p (from, GENERAL_REGS))
33977 reg_class_t rclass = from;
33979 if (! reg_classes_intersect_p (to, GENERAL_REGS))
33980 rclass = to;
33982 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
33983 ret = (rs6000_memory_move_cost (mode, rclass, false)
33984 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
33986 /* It's more expensive to move CR_REGS than CR0_REGS because of the
33987 shift. */
33988 else if (rclass == CR_REGS)
33989 ret = 4;
33991 /* For those processors that have slow LR/CTR moves, make them more
33992 expensive than memory in order to bias spills to memory .*/
33993 else if ((rs6000_cpu == PROCESSOR_POWER6
33994 || rs6000_cpu == PROCESSOR_POWER7
33995 || rs6000_cpu == PROCESSOR_POWER8
33996 || rs6000_cpu == PROCESSOR_POWER9)
33997 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
33998 ret = 6 * hard_regno_nregs[0][mode];
34000 else
34001 /* A move will cost one instruction per GPR moved. */
34002 ret = 2 * hard_regno_nregs[0][mode];
34005 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34006 else if (VECTOR_MEM_VSX_P (mode)
34007 && reg_classes_intersect_p (to, VSX_REGS)
34008 && reg_classes_intersect_p (from, VSX_REGS))
34009 ret = 2 * hard_regno_nregs[32][mode];
34011 /* Moving between two similar registers is just one instruction. */
34012 else if (reg_classes_intersect_p (to, from))
34013 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34015 /* Everything else has to go through GENERAL_REGS. */
34016 else
34017 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34018 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34020 if (TARGET_DEBUG_COST)
34022 if (dbg_cost_ctrl == 1)
34023 fprintf (stderr,
34024 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34025 ret, GET_MODE_NAME (mode), reg_class_names[from],
34026 reg_class_names[to]);
34027 dbg_cost_ctrl--;
34030 return ret;
34033 /* A C expressions returning the cost of moving data of MODE from a register to
34034 or from memory. */
34036 static int
34037 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34038 bool in ATTRIBUTE_UNUSED)
34040 int ret;
34042 if (TARGET_DEBUG_COST)
34043 dbg_cost_ctrl++;
34045 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34046 ret = 4 * hard_regno_nregs[0][mode];
34047 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34048 || reg_classes_intersect_p (rclass, VSX_REGS)))
34049 ret = 4 * hard_regno_nregs[32][mode];
34050 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34051 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34052 else
34053 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34055 if (TARGET_DEBUG_COST)
34057 if (dbg_cost_ctrl == 1)
34058 fprintf (stderr,
34059 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34060 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34061 dbg_cost_ctrl--;
34064 return ret;
34067 /* Returns a code for a target-specific builtin that implements
34068 reciprocal of the function, or NULL_TREE if not available. */
34070 static tree
34071 rs6000_builtin_reciprocal (tree fndecl)
34073 switch (DECL_FUNCTION_CODE (fndecl))
34075 case VSX_BUILTIN_XVSQRTDP:
34076 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34077 return NULL_TREE;
34079 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34081 case VSX_BUILTIN_XVSQRTSP:
34082 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34083 return NULL_TREE;
34085 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34087 default:
34088 return NULL_TREE;
34092 /* Load up a constant. If the mode is a vector mode, splat the value across
34093 all of the vector elements. */
34095 static rtx
34096 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34098 rtx reg;
34100 if (mode == SFmode || mode == DFmode)
34102 rtx d = const_double_from_real_value (dconst, mode);
34103 reg = force_reg (mode, d);
34105 else if (mode == V4SFmode)
34107 rtx d = const_double_from_real_value (dconst, SFmode);
34108 rtvec v = gen_rtvec (4, d, d, d, d);
34109 reg = gen_reg_rtx (mode);
34110 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34112 else if (mode == V2DFmode)
34114 rtx d = const_double_from_real_value (dconst, DFmode);
34115 rtvec v = gen_rtvec (2, d, d);
34116 reg = gen_reg_rtx (mode);
34117 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34119 else
34120 gcc_unreachable ();
34122 return reg;
34125 /* Generate an FMA instruction. */
34127 static void
34128 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34130 machine_mode mode = GET_MODE (target);
34131 rtx dst;
34133 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34134 gcc_assert (dst != NULL);
34136 if (dst != target)
34137 emit_move_insn (target, dst);
34140 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34142 static void
34143 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34145 machine_mode mode = GET_MODE (dst);
34146 rtx r;
34148 /* This is a tad more complicated, since the fnma_optab is for
34149 a different expression: fma(-m1, m2, a), which is the same
34150 thing except in the case of signed zeros.
34152 Fortunately we know that if FMA is supported that FNMSUB is
34153 also supported in the ISA. Just expand it directly. */
34155 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34157 r = gen_rtx_NEG (mode, a);
34158 r = gen_rtx_FMA (mode, m1, m2, r);
34159 r = gen_rtx_NEG (mode, r);
34160 emit_insn (gen_rtx_SET (dst, r));
34163 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34164 add a reg_note saying that this was a division. Support both scalar and
34165 vector divide. Assumes no trapping math and finite arguments. */
34167 void
34168 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34170 machine_mode mode = GET_MODE (dst);
34171 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34172 int i;
34174 /* Low precision estimates guarantee 5 bits of accuracy. High
34175 precision estimates guarantee 14 bits of accuracy. SFmode
34176 requires 23 bits of accuracy. DFmode requires 52 bits of
34177 accuracy. Each pass at least doubles the accuracy, leading
34178 to the following. */
34179 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34180 if (mode == DFmode || mode == V2DFmode)
34181 passes++;
34183 enum insn_code code = optab_handler (smul_optab, mode);
34184 insn_gen_fn gen_mul = GEN_FCN (code);
34186 gcc_assert (code != CODE_FOR_nothing);
34188 one = rs6000_load_constant_and_splat (mode, dconst1);
34190 /* x0 = 1./d estimate */
34191 x0 = gen_reg_rtx (mode);
34192 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34193 UNSPEC_FRES)));
34195 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34196 if (passes > 1) {
34198 /* e0 = 1. - d * x0 */
34199 e0 = gen_reg_rtx (mode);
34200 rs6000_emit_nmsub (e0, d, x0, one);
34202 /* x1 = x0 + e0 * x0 */
34203 x1 = gen_reg_rtx (mode);
34204 rs6000_emit_madd (x1, e0, x0, x0);
34206 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34207 ++i, xprev = xnext, eprev = enext) {
34209 /* enext = eprev * eprev */
34210 enext = gen_reg_rtx (mode);
34211 emit_insn (gen_mul (enext, eprev, eprev));
34213 /* xnext = xprev + enext * xprev */
34214 xnext = gen_reg_rtx (mode);
34215 rs6000_emit_madd (xnext, enext, xprev, xprev);
34218 } else
34219 xprev = x0;
34221 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34223 /* u = n * xprev */
34224 u = gen_reg_rtx (mode);
34225 emit_insn (gen_mul (u, n, xprev));
34227 /* v = n - (d * u) */
34228 v = gen_reg_rtx (mode);
34229 rs6000_emit_nmsub (v, d, u, n);
34231 /* dst = (v * xprev) + u */
34232 rs6000_emit_madd (dst, v, xprev, u);
34234 if (note_p)
34235 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34238 /* Goldschmidt's Algorithm for single/double-precision floating point
34239 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34241 void
34242 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34244 machine_mode mode = GET_MODE (src);
34245 rtx e = gen_reg_rtx (mode);
34246 rtx g = gen_reg_rtx (mode);
34247 rtx h = gen_reg_rtx (mode);
34249 /* Low precision estimates guarantee 5 bits of accuracy. High
34250 precision estimates guarantee 14 bits of accuracy. SFmode
34251 requires 23 bits of accuracy. DFmode requires 52 bits of
34252 accuracy. Each pass at least doubles the accuracy, leading
34253 to the following. */
34254 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34255 if (mode == DFmode || mode == V2DFmode)
34256 passes++;
34258 int i;
34259 rtx mhalf;
34260 enum insn_code code = optab_handler (smul_optab, mode);
34261 insn_gen_fn gen_mul = GEN_FCN (code);
34263 gcc_assert (code != CODE_FOR_nothing);
34265 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34267 /* e = rsqrt estimate */
34268 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34269 UNSPEC_RSQRT)));
34271 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34272 if (!recip)
34274 rtx zero = force_reg (mode, CONST0_RTX (mode));
34276 if (mode == SFmode)
34278 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34279 e, zero, mode, 0);
34280 if (target != e)
34281 emit_move_insn (e, target);
34283 else
34285 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34286 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34290 /* g = sqrt estimate. */
34291 emit_insn (gen_mul (g, e, src));
34292 /* h = 1/(2*sqrt) estimate. */
34293 emit_insn (gen_mul (h, e, mhalf));
34295 if (recip)
34297 if (passes == 1)
34299 rtx t = gen_reg_rtx (mode);
34300 rs6000_emit_nmsub (t, g, h, mhalf);
34301 /* Apply correction directly to 1/rsqrt estimate. */
34302 rs6000_emit_madd (dst, e, t, e);
34304 else
34306 for (i = 0; i < passes; i++)
34308 rtx t1 = gen_reg_rtx (mode);
34309 rtx g1 = gen_reg_rtx (mode);
34310 rtx h1 = gen_reg_rtx (mode);
34312 rs6000_emit_nmsub (t1, g, h, mhalf);
34313 rs6000_emit_madd (g1, g, t1, g);
34314 rs6000_emit_madd (h1, h, t1, h);
34316 g = g1;
34317 h = h1;
34319 /* Multiply by 2 for 1/rsqrt. */
34320 emit_insn (gen_add3_insn (dst, h, h));
34323 else
34325 rtx t = gen_reg_rtx (mode);
34326 rs6000_emit_nmsub (t, g, h, mhalf);
34327 rs6000_emit_madd (dst, g, t, g);
34330 return;
34333 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34334 (Power7) targets. DST is the target, and SRC is the argument operand. */
34336 void
34337 rs6000_emit_popcount (rtx dst, rtx src)
34339 machine_mode mode = GET_MODE (dst);
34340 rtx tmp1, tmp2;
34342 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34343 if (TARGET_POPCNTD)
34345 if (mode == SImode)
34346 emit_insn (gen_popcntdsi2 (dst, src));
34347 else
34348 emit_insn (gen_popcntddi2 (dst, src));
34349 return;
34352 tmp1 = gen_reg_rtx (mode);
34354 if (mode == SImode)
34356 emit_insn (gen_popcntbsi2 (tmp1, src));
34357 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34358 NULL_RTX, 0);
34359 tmp2 = force_reg (SImode, tmp2);
34360 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34362 else
34364 emit_insn (gen_popcntbdi2 (tmp1, src));
34365 tmp2 = expand_mult (DImode, tmp1,
34366 GEN_INT ((HOST_WIDE_INT)
34367 0x01010101 << 32 | 0x01010101),
34368 NULL_RTX, 0);
34369 tmp2 = force_reg (DImode, tmp2);
34370 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34375 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34376 target, and SRC is the argument operand. */
34378 void
34379 rs6000_emit_parity (rtx dst, rtx src)
34381 machine_mode mode = GET_MODE (dst);
34382 rtx tmp;
34384 tmp = gen_reg_rtx (mode);
34386 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34387 if (TARGET_CMPB)
34389 if (mode == SImode)
34391 emit_insn (gen_popcntbsi2 (tmp, src));
34392 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34394 else
34396 emit_insn (gen_popcntbdi2 (tmp, src));
34397 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34399 return;
34402 if (mode == SImode)
34404 /* Is mult+shift >= shift+xor+shift+xor? */
34405 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34407 rtx tmp1, tmp2, tmp3, tmp4;
34409 tmp1 = gen_reg_rtx (SImode);
34410 emit_insn (gen_popcntbsi2 (tmp1, src));
34412 tmp2 = gen_reg_rtx (SImode);
34413 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34414 tmp3 = gen_reg_rtx (SImode);
34415 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34417 tmp4 = gen_reg_rtx (SImode);
34418 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34419 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34421 else
34422 rs6000_emit_popcount (tmp, src);
34423 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34425 else
34427 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34428 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34430 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34432 tmp1 = gen_reg_rtx (DImode);
34433 emit_insn (gen_popcntbdi2 (tmp1, src));
34435 tmp2 = gen_reg_rtx (DImode);
34436 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34437 tmp3 = gen_reg_rtx (DImode);
34438 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34440 tmp4 = gen_reg_rtx (DImode);
34441 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34442 tmp5 = gen_reg_rtx (DImode);
34443 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34445 tmp6 = gen_reg_rtx (DImode);
34446 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34447 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34449 else
34450 rs6000_emit_popcount (tmp, src);
34451 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34455 /* Expand an Altivec constant permutation for little endian mode.
34456 There are two issues: First, the two input operands must be
34457 swapped so that together they form a double-wide array in LE
34458 order. Second, the vperm instruction has surprising behavior
34459 in LE mode: it interprets the elements of the source vectors
34460 in BE mode ("left to right") and interprets the elements of
34461 the destination vector in LE mode ("right to left"). To
34462 correct for this, we must subtract each element of the permute
34463 control vector from 31.
34465 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34466 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34467 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34468 serve as the permute control vector. Then, in BE mode,
34470 vperm 9,10,11,12
34472 places the desired result in vr9. However, in LE mode the
34473 vector contents will be
34475 vr10 = 00000003 00000002 00000001 00000000
34476 vr11 = 00000007 00000006 00000005 00000004
34478 The result of the vperm using the same permute control vector is
34480 vr9 = 05000000 07000000 01000000 03000000
34482 That is, the leftmost 4 bytes of vr10 are interpreted as the
34483 source for the rightmost 4 bytes of vr9, and so on.
34485 If we change the permute control vector to
34487 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34489 and issue
34491 vperm 9,11,10,12
34493 we get the desired
34495 vr9 = 00000006 00000004 00000002 00000000. */
34497 void
34498 altivec_expand_vec_perm_const_le (rtx operands[4])
34500 unsigned int i;
34501 rtx perm[16];
34502 rtx constv, unspec;
34503 rtx target = operands[0];
34504 rtx op0 = operands[1];
34505 rtx op1 = operands[2];
34506 rtx sel = operands[3];
34508 /* Unpack and adjust the constant selector. */
34509 for (i = 0; i < 16; ++i)
34511 rtx e = XVECEXP (sel, 0, i);
34512 unsigned int elt = 31 - (INTVAL (e) & 31);
34513 perm[i] = GEN_INT (elt);
34516 /* Expand to a permute, swapping the inputs and using the
34517 adjusted selector. */
34518 if (!REG_P (op0))
34519 op0 = force_reg (V16QImode, op0);
34520 if (!REG_P (op1))
34521 op1 = force_reg (V16QImode, op1);
34523 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34524 constv = force_reg (V16QImode, constv);
34525 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34526 UNSPEC_VPERM);
34527 if (!REG_P (target))
34529 rtx tmp = gen_reg_rtx (V16QImode);
34530 emit_move_insn (tmp, unspec);
34531 unspec = tmp;
34534 emit_move_insn (target, unspec);
34537 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34538 permute control vector. But here it's not a constant, so we must
34539 generate a vector NAND or NOR to do the adjustment. */
34541 void
34542 altivec_expand_vec_perm_le (rtx operands[4])
34544 rtx notx, iorx, unspec;
34545 rtx target = operands[0];
34546 rtx op0 = operands[1];
34547 rtx op1 = operands[2];
34548 rtx sel = operands[3];
34549 rtx tmp = target;
34550 rtx norreg = gen_reg_rtx (V16QImode);
34551 machine_mode mode = GET_MODE (target);
34553 /* Get everything in regs so the pattern matches. */
34554 if (!REG_P (op0))
34555 op0 = force_reg (mode, op0);
34556 if (!REG_P (op1))
34557 op1 = force_reg (mode, op1);
34558 if (!REG_P (sel))
34559 sel = force_reg (V16QImode, sel);
34560 if (!REG_P (target))
34561 tmp = gen_reg_rtx (mode);
34563 if (TARGET_P9_VECTOR)
34565 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
34566 UNSPEC_VPERMR);
34568 else
34570 /* Invert the selector with a VNAND if available, else a VNOR.
34571 The VNAND is preferred for future fusion opportunities. */
34572 notx = gen_rtx_NOT (V16QImode, sel);
34573 iorx = (TARGET_P8_VECTOR
34574 ? gen_rtx_IOR (V16QImode, notx, notx)
34575 : gen_rtx_AND (V16QImode, notx, notx));
34576 emit_insn (gen_rtx_SET (norreg, iorx));
34578 /* Permute with operands reversed and adjusted selector. */
34579 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
34580 UNSPEC_VPERM);
34583 /* Copy into target, possibly by way of a register. */
34584 if (!REG_P (target))
34586 emit_move_insn (tmp, unspec);
34587 unspec = tmp;
34590 emit_move_insn (target, unspec);
34593 /* Expand an Altivec constant permutation. Return true if we match
34594 an efficient implementation; false to fall back to VPERM. */
34596 bool
34597 altivec_expand_vec_perm_const (rtx operands[4])
34599 struct altivec_perm_insn {
34600 HOST_WIDE_INT mask;
34601 enum insn_code impl;
34602 unsigned char perm[16];
34604 static const struct altivec_perm_insn patterns[] = {
34605 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
34606 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
34607 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
34608 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
34609 { OPTION_MASK_ALTIVEC,
34610 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
34611 : CODE_FOR_altivec_vmrglb_direct),
34612 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
34613 { OPTION_MASK_ALTIVEC,
34614 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
34615 : CODE_FOR_altivec_vmrglh_direct),
34616 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
34617 { OPTION_MASK_ALTIVEC,
34618 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
34619 : CODE_FOR_altivec_vmrglw_direct),
34620 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
34621 { OPTION_MASK_ALTIVEC,
34622 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
34623 : CODE_FOR_altivec_vmrghb_direct),
34624 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
34625 { OPTION_MASK_ALTIVEC,
34626 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
34627 : CODE_FOR_altivec_vmrghh_direct),
34628 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
34629 { OPTION_MASK_ALTIVEC,
34630 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
34631 : CODE_FOR_altivec_vmrghw_direct),
34632 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
34633 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
34634 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
34635 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
34636 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
34639 unsigned int i, j, elt, which;
34640 unsigned char perm[16];
34641 rtx target, op0, op1, sel, x;
34642 bool one_vec;
34644 target = operands[0];
34645 op0 = operands[1];
34646 op1 = operands[2];
34647 sel = operands[3];
34649 /* Unpack the constant selector. */
34650 for (i = which = 0; i < 16; ++i)
34652 rtx e = XVECEXP (sel, 0, i);
34653 elt = INTVAL (e) & 31;
34654 which |= (elt < 16 ? 1 : 2);
34655 perm[i] = elt;
34658 /* Simplify the constant selector based on operands. */
34659 switch (which)
34661 default:
34662 gcc_unreachable ();
34664 case 3:
34665 one_vec = false;
34666 if (!rtx_equal_p (op0, op1))
34667 break;
34668 /* FALLTHRU */
34670 case 2:
34671 for (i = 0; i < 16; ++i)
34672 perm[i] &= 15;
34673 op0 = op1;
34674 one_vec = true;
34675 break;
34677 case 1:
34678 op1 = op0;
34679 one_vec = true;
34680 break;
34683 /* Look for splat patterns. */
34684 if (one_vec)
34686 elt = perm[0];
34688 for (i = 0; i < 16; ++i)
34689 if (perm[i] != elt)
34690 break;
34691 if (i == 16)
34693 if (!BYTES_BIG_ENDIAN)
34694 elt = 15 - elt;
34695 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
34696 return true;
34699 if (elt % 2 == 0)
34701 for (i = 0; i < 16; i += 2)
34702 if (perm[i] != elt || perm[i + 1] != elt + 1)
34703 break;
34704 if (i == 16)
34706 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
34707 x = gen_reg_rtx (V8HImode);
34708 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
34709 GEN_INT (field)));
34710 emit_move_insn (target, gen_lowpart (V16QImode, x));
34711 return true;
34715 if (elt % 4 == 0)
34717 for (i = 0; i < 16; i += 4)
34718 if (perm[i] != elt
34719 || perm[i + 1] != elt + 1
34720 || perm[i + 2] != elt + 2
34721 || perm[i + 3] != elt + 3)
34722 break;
34723 if (i == 16)
34725 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
34726 x = gen_reg_rtx (V4SImode);
34727 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
34728 GEN_INT (field)));
34729 emit_move_insn (target, gen_lowpart (V16QImode, x));
34730 return true;
34735 /* Look for merge and pack patterns. */
34736 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
34738 bool swapped;
34740 if ((patterns[j].mask & rs6000_isa_flags) == 0)
34741 continue;
34743 elt = patterns[j].perm[0];
34744 if (perm[0] == elt)
34745 swapped = false;
34746 else if (perm[0] == elt + 16)
34747 swapped = true;
34748 else
34749 continue;
34750 for (i = 1; i < 16; ++i)
34752 elt = patterns[j].perm[i];
34753 if (swapped)
34754 elt = (elt >= 16 ? elt - 16 : elt + 16);
34755 else if (one_vec && elt >= 16)
34756 elt -= 16;
34757 if (perm[i] != elt)
34758 break;
34760 if (i == 16)
34762 enum insn_code icode = patterns[j].impl;
34763 machine_mode omode = insn_data[icode].operand[0].mode;
34764 machine_mode imode = insn_data[icode].operand[1].mode;
34766 /* For little-endian, don't use vpkuwum and vpkuhum if the
34767 underlying vector type is not V4SI and V8HI, respectively.
34768 For example, using vpkuwum with a V8HI picks up the even
34769 halfwords (BE numbering) when the even halfwords (LE
34770 numbering) are what we need. */
34771 if (!BYTES_BIG_ENDIAN
34772 && icode == CODE_FOR_altivec_vpkuwum_direct
34773 && ((GET_CODE (op0) == REG
34774 && GET_MODE (op0) != V4SImode)
34775 || (GET_CODE (op0) == SUBREG
34776 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
34777 continue;
34778 if (!BYTES_BIG_ENDIAN
34779 && icode == CODE_FOR_altivec_vpkuhum_direct
34780 && ((GET_CODE (op0) == REG
34781 && GET_MODE (op0) != V8HImode)
34782 || (GET_CODE (op0) == SUBREG
34783 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
34784 continue;
34786 /* For little-endian, the two input operands must be swapped
34787 (or swapped back) to ensure proper right-to-left numbering
34788 from 0 to 2N-1. */
34789 if (swapped ^ !BYTES_BIG_ENDIAN)
34790 std::swap (op0, op1);
34791 if (imode != V16QImode)
34793 op0 = gen_lowpart (imode, op0);
34794 op1 = gen_lowpart (imode, op1);
34796 if (omode == V16QImode)
34797 x = target;
34798 else
34799 x = gen_reg_rtx (omode);
34800 emit_insn (GEN_FCN (icode) (x, op0, op1));
34801 if (omode != V16QImode)
34802 emit_move_insn (target, gen_lowpart (V16QImode, x));
34803 return true;
34807 if (!BYTES_BIG_ENDIAN)
34809 altivec_expand_vec_perm_const_le (operands);
34810 return true;
34813 return false;
34816 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
34817 Return true if we match an efficient implementation. */
34819 static bool
34820 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
34821 unsigned char perm0, unsigned char perm1)
34823 rtx x;
34825 /* If both selectors come from the same operand, fold to single op. */
34826 if ((perm0 & 2) == (perm1 & 2))
34828 if (perm0 & 2)
34829 op0 = op1;
34830 else
34831 op1 = op0;
34833 /* If both operands are equal, fold to simpler permutation. */
34834 if (rtx_equal_p (op0, op1))
34836 perm0 = perm0 & 1;
34837 perm1 = (perm1 & 1) + 2;
34839 /* If the first selector comes from the second operand, swap. */
34840 else if (perm0 & 2)
34842 if (perm1 & 2)
34843 return false;
34844 perm0 -= 2;
34845 perm1 += 2;
34846 std::swap (op0, op1);
34848 /* If the second selector does not come from the second operand, fail. */
34849 else if ((perm1 & 2) == 0)
34850 return false;
34852 /* Success! */
34853 if (target != NULL)
34855 machine_mode vmode, dmode;
34856 rtvec v;
34858 vmode = GET_MODE (target);
34859 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
34860 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
34861 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
34862 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
34863 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
34864 emit_insn (gen_rtx_SET (target, x));
34866 return true;
34869 bool
34870 rs6000_expand_vec_perm_const (rtx operands[4])
34872 rtx target, op0, op1, sel;
34873 unsigned char perm0, perm1;
34875 target = operands[0];
34876 op0 = operands[1];
34877 op1 = operands[2];
34878 sel = operands[3];
34880 /* Unpack the constant selector. */
34881 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
34882 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
34884 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
34887 /* Test whether a constant permutation is supported. */
34889 static bool
34890 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
34891 const unsigned char *sel)
34893 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
34894 if (TARGET_ALTIVEC)
34895 return true;
34897 /* Check for ps_merge* or evmerge* insns. */
34898 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
34899 || (TARGET_SPE && vmode == V2SImode))
34901 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
34902 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
34903 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
34906 return false;
34909 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
34911 static void
34912 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
34913 machine_mode vmode, unsigned nelt, rtx perm[])
34915 machine_mode imode;
34916 rtx x;
34918 imode = vmode;
34919 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
34921 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
34922 imode = mode_for_vector (imode, nelt);
34925 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
34926 x = expand_vec_perm (vmode, op0, op1, x, target);
34927 if (x != target)
34928 emit_move_insn (target, x);
34931 /* Expand an extract even operation. */
34933 void
34934 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
34936 machine_mode vmode = GET_MODE (target);
34937 unsigned i, nelt = GET_MODE_NUNITS (vmode);
34938 rtx perm[16];
34940 for (i = 0; i < nelt; i++)
34941 perm[i] = GEN_INT (i * 2);
34943 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
34946 /* Expand a vector interleave operation. */
34948 void
34949 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
34951 machine_mode vmode = GET_MODE (target);
34952 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
34953 rtx perm[16];
34955 high = (highp ? 0 : nelt / 2);
34956 for (i = 0; i < nelt / 2; i++)
34958 perm[i * 2] = GEN_INT (i + high);
34959 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
34962 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
34965 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
34966 void
34967 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
34969 HOST_WIDE_INT hwi_scale (scale);
34970 REAL_VALUE_TYPE r_pow;
34971 rtvec v = rtvec_alloc (2);
34972 rtx elt;
34973 rtx scale_vec = gen_reg_rtx (V2DFmode);
34974 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
34975 elt = const_double_from_real_value (r_pow, DFmode);
34976 RTVEC_ELT (v, 0) = elt;
34977 RTVEC_ELT (v, 1) = elt;
34978 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
34979 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
34982 /* Return an RTX representing where to find the function value of a
34983 function returning MODE. */
34984 static rtx
34985 rs6000_complex_function_value (machine_mode mode)
34987 unsigned int regno;
34988 rtx r1, r2;
34989 machine_mode inner = GET_MODE_INNER (mode);
34990 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
34992 if (TARGET_FLOAT128
34993 && (mode == KCmode
34994 || (mode == TCmode && TARGET_IEEEQUAD)))
34995 regno = ALTIVEC_ARG_RETURN;
34997 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
34998 regno = FP_ARG_RETURN;
35000 else
35002 regno = GP_ARG_RETURN;
35004 /* 32-bit is OK since it'll go in r3/r4. */
35005 if (TARGET_32BIT && inner_bytes >= 4)
35006 return gen_rtx_REG (mode, regno);
35009 if (inner_bytes >= 8)
35010 return gen_rtx_REG (mode, regno);
35012 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35013 const0_rtx);
35014 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35015 GEN_INT (inner_bytes));
35016 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35019 /* Return an rtx describing a return value of MODE as a PARALLEL
35020 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35021 stride REG_STRIDE. */
35023 static rtx
35024 rs6000_parallel_return (machine_mode mode,
35025 int n_elts, machine_mode elt_mode,
35026 unsigned int regno, unsigned int reg_stride)
35028 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35030 int i;
35031 for (i = 0; i < n_elts; i++)
35033 rtx r = gen_rtx_REG (elt_mode, regno);
35034 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35035 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35036 regno += reg_stride;
35039 return par;
35042 /* Target hook for TARGET_FUNCTION_VALUE.
35044 On the SPE, both FPs and vectors are returned in r3.
35046 On RS/6000 an integer value is in r3 and a floating-point value is in
35047 fp1, unless -msoft-float. */
35049 static rtx
35050 rs6000_function_value (const_tree valtype,
35051 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35052 bool outgoing ATTRIBUTE_UNUSED)
35054 machine_mode mode;
35055 unsigned int regno;
35056 machine_mode elt_mode;
35057 int n_elts;
35059 /* Special handling for structs in darwin64. */
35060 if (TARGET_MACHO
35061 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35063 CUMULATIVE_ARGS valcum;
35064 rtx valret;
35066 valcum.words = 0;
35067 valcum.fregno = FP_ARG_MIN_REG;
35068 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35069 /* Do a trial code generation as if this were going to be passed as
35070 an argument; if any part goes in memory, we return NULL. */
35071 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35072 if (valret)
35073 return valret;
35074 /* Otherwise fall through to standard ABI rules. */
35077 mode = TYPE_MODE (valtype);
35079 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35080 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35082 int first_reg, n_regs;
35084 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35086 /* _Decimal128 must use even/odd register pairs. */
35087 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35088 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35090 else
35092 first_reg = ALTIVEC_ARG_RETURN;
35093 n_regs = 1;
35096 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35099 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35100 if (TARGET_32BIT && TARGET_POWERPC64)
35101 switch (mode)
35103 default:
35104 break;
35105 case DImode:
35106 case SCmode:
35107 case DCmode:
35108 case TCmode:
35109 int count = GET_MODE_SIZE (mode) / 4;
35110 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35113 if ((INTEGRAL_TYPE_P (valtype)
35114 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35115 || POINTER_TYPE_P (valtype))
35116 mode = TARGET_32BIT ? SImode : DImode;
35118 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35119 /* _Decimal128 must use an even/odd register pair. */
35120 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35121 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS
35122 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35123 regno = FP_ARG_RETURN;
35124 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35125 && targetm.calls.split_complex_arg)
35126 return rs6000_complex_function_value (mode);
35127 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35128 return register is used in both cases, and we won't see V2DImode/V2DFmode
35129 for pure altivec, combine the two cases. */
35130 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35131 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35132 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35133 regno = ALTIVEC_ARG_RETURN;
35134 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35135 && (mode == DFmode || mode == DCmode
35136 || FLOAT128_IBM_P (mode) || mode == TCmode))
35137 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35138 else
35139 regno = GP_ARG_RETURN;
35141 return gen_rtx_REG (mode, regno);
35144 /* Define how to find the value returned by a library function
35145 assuming the value has mode MODE. */
35147 rs6000_libcall_value (machine_mode mode)
35149 unsigned int regno;
35151 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35152 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35153 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35155 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
35156 /* _Decimal128 must use an even/odd register pair. */
35157 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35158 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35159 && TARGET_HARD_FLOAT && TARGET_FPRS
35160 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35161 regno = FP_ARG_RETURN;
35162 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35163 return register is used in both cases, and we won't see V2DImode/V2DFmode
35164 for pure altivec, combine the two cases. */
35165 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35166 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35167 regno = ALTIVEC_ARG_RETURN;
35168 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35169 return rs6000_complex_function_value (mode);
35170 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
35171 && (mode == DFmode || mode == DCmode
35172 || FLOAT128_IBM_P (mode) || mode == TCmode))
35173 return spe_build_register_parallel (mode, GP_ARG_RETURN);
35174 else
35175 regno = GP_ARG_RETURN;
35177 return gen_rtx_REG (mode, regno);
35181 /* Return true if we use LRA instead of reload pass. */
35182 static bool
35183 rs6000_lra_p (void)
35185 return TARGET_LRA;
35188 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35189 Frame pointer elimination is automatically handled.
35191 For the RS/6000, if frame pointer elimination is being done, we would like
35192 to convert ap into fp, not sp.
35194 We need r30 if -mminimal-toc was specified, and there are constant pool
35195 references. */
35197 static bool
35198 rs6000_can_eliminate (const int from, const int to)
35200 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35201 ? ! frame_pointer_needed
35202 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35203 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
35204 : true);
35207 /* Define the offset between two registers, FROM to be eliminated and its
35208 replacement TO, at the start of a routine. */
35209 HOST_WIDE_INT
35210 rs6000_initial_elimination_offset (int from, int to)
35212 rs6000_stack_t *info = rs6000_stack_info ();
35213 HOST_WIDE_INT offset;
35215 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35216 offset = info->push_p ? 0 : -info->total_size;
35217 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35219 offset = info->push_p ? 0 : -info->total_size;
35220 if (FRAME_GROWS_DOWNWARD)
35221 offset += info->fixed_size + info->vars_size + info->parm_size;
35223 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35224 offset = FRAME_GROWS_DOWNWARD
35225 ? info->fixed_size + info->vars_size + info->parm_size
35226 : 0;
35227 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35228 offset = info->total_size;
35229 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35230 offset = info->push_p ? info->total_size : 0;
35231 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35232 offset = 0;
35233 else
35234 gcc_unreachable ();
35236 return offset;
35239 static rtx
35240 rs6000_dwarf_register_span (rtx reg)
35242 rtx parts[8];
35243 int i, words;
35244 unsigned regno = REGNO (reg);
35245 machine_mode mode = GET_MODE (reg);
35247 if (TARGET_SPE
35248 && regno < 32
35249 && (SPE_VECTOR_MODE (GET_MODE (reg))
35250 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
35251 && mode != SFmode && mode != SDmode && mode != SCmode)))
35253 else
35254 return NULL_RTX;
35256 regno = REGNO (reg);
35258 /* The duality of the SPE register size wreaks all kinds of havoc.
35259 This is a way of distinguishing r0 in 32-bits from r0 in
35260 64-bits. */
35261 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
35262 gcc_assert (words <= 4);
35263 for (i = 0; i < words; i++, regno++)
35265 if (BYTES_BIG_ENDIAN)
35267 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
35268 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
35270 else
35272 parts[2 * i] = gen_rtx_REG (SImode, regno);
35273 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
35277 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
35280 /* Fill in sizes for SPE register high parts in table used by unwinder. */
35282 static void
35283 rs6000_init_dwarf_reg_sizes_extra (tree address)
35285 if (TARGET_SPE)
35287 int i;
35288 machine_mode mode = TYPE_MODE (char_type_node);
35289 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35290 rtx mem = gen_rtx_MEM (BLKmode, addr);
35291 rtx value = gen_int_mode (4, mode);
35293 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
35295 int column = DWARF_REG_TO_UNWIND_COLUMN
35296 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35297 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35299 emit_move_insn (adjust_address (mem, mode, offset), value);
35303 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35305 int i;
35306 machine_mode mode = TYPE_MODE (char_type_node);
35307 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35308 rtx mem = gen_rtx_MEM (BLKmode, addr);
35309 rtx value = gen_int_mode (16, mode);
35311 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35312 The unwinder still needs to know the size of Altivec registers. */
35314 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35316 int column = DWARF_REG_TO_UNWIND_COLUMN
35317 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35318 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35320 emit_move_insn (adjust_address (mem, mode, offset), value);
35325 /* Map internal gcc register numbers to debug format register numbers.
35326 FORMAT specifies the type of debug register number to use:
35327 0 -- debug information, except for frame-related sections
35328 1 -- DWARF .debug_frame section
35329 2 -- DWARF .eh_frame section */
35331 unsigned int
35332 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35334 /* We never use the GCC internal number for SPE high registers.
35335 Those are mapped to the 1200..1231 range for all debug formats. */
35336 if (SPE_HIGH_REGNO_P (regno))
35337 return regno - FIRST_SPE_HIGH_REGNO + 1200;
35339 /* Except for the above, we use the internal number for non-DWARF
35340 debug information, and also for .eh_frame. */
35341 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35342 return regno;
35344 /* On some platforms, we use the standard DWARF register
35345 numbering for .debug_info and .debug_frame. */
35346 #ifdef RS6000_USE_DWARF_NUMBERING
35347 if (regno <= 63)
35348 return regno;
35349 if (regno == LR_REGNO)
35350 return 108;
35351 if (regno == CTR_REGNO)
35352 return 109;
35353 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35354 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35355 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35356 to the DWARF reg for CR. */
35357 if (format == 1 && regno == CR2_REGNO)
35358 return 64;
35359 if (CR_REGNO_P (regno))
35360 return regno - CR0_REGNO + 86;
35361 if (regno == CA_REGNO)
35362 return 101; /* XER */
35363 if (ALTIVEC_REGNO_P (regno))
35364 return regno - FIRST_ALTIVEC_REGNO + 1124;
35365 if (regno == VRSAVE_REGNO)
35366 return 356;
35367 if (regno == VSCR_REGNO)
35368 return 67;
35369 if (regno == SPE_ACC_REGNO)
35370 return 99;
35371 if (regno == SPEFSCR_REGNO)
35372 return 612;
35373 #endif
35374 return regno;
35377 /* target hook eh_return_filter_mode */
35378 static machine_mode
35379 rs6000_eh_return_filter_mode (void)
35381 return TARGET_32BIT ? SImode : word_mode;
35384 /* Target hook for scalar_mode_supported_p. */
35385 static bool
35386 rs6000_scalar_mode_supported_p (machine_mode mode)
35388 /* -m32 does not support TImode. This is the default, from
35389 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35390 same ABI as for -m32. But default_scalar_mode_supported_p allows
35391 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35392 for -mpowerpc64. */
35393 if (TARGET_32BIT && mode == TImode)
35394 return false;
35396 if (DECIMAL_FLOAT_MODE_P (mode))
35397 return default_decimal_float_supported_p ();
35398 else if (TARGET_FLOAT128 && (mode == KFmode || mode == IFmode))
35399 return true;
35400 else
35401 return default_scalar_mode_supported_p (mode);
35404 /* Target hook for vector_mode_supported_p. */
35405 static bool
35406 rs6000_vector_mode_supported_p (machine_mode mode)
35409 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
35410 return true;
35412 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
35413 return true;
35415 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35416 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35417 double-double. */
35418 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35419 return true;
35421 else
35422 return false;
35425 /* Target hook for c_mode_for_suffix. */
35426 static machine_mode
35427 rs6000_c_mode_for_suffix (char suffix)
35429 if (TARGET_FLOAT128)
35431 if (suffix == 'q' || suffix == 'Q')
35432 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35434 /* At the moment, we are not defining a suffix for IBM extended double.
35435 If/when the default for -mabi=ieeelongdouble is changed, and we want
35436 to support __ibm128 constants in legacy library code, we may need to
35437 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35438 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35439 __float80 constants. */
35442 return VOIDmode;
35445 /* Target hook for invalid_arg_for_unprototyped_fn. */
35446 static const char *
35447 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35449 return (!rs6000_darwin64_abi
35450 && typelist == 0
35451 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35452 && (funcdecl == NULL_TREE
35453 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35454 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35455 ? N_("AltiVec argument passed to unprototyped function")
35456 : NULL;
35459 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35460 setup by using __stack_chk_fail_local hidden function instead of
35461 calling __stack_chk_fail directly. Otherwise it is better to call
35462 __stack_chk_fail directly. */
35464 static tree ATTRIBUTE_UNUSED
35465 rs6000_stack_protect_fail (void)
35467 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35468 ? default_hidden_stack_protect_fail ()
35469 : default_external_stack_protect_fail ();
35472 void
35473 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
35474 int num_operands ATTRIBUTE_UNUSED)
35476 if (rs6000_warn_cell_microcode)
35478 const char *temp;
35479 int insn_code_number = recog_memoized (insn);
35480 location_t location = INSN_LOCATION (insn);
35482 /* Punt on insns we cannot recognize. */
35483 if (insn_code_number < 0)
35484 return;
35486 temp = get_insn_template (insn_code_number, insn);
35488 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
35489 warning_at (location, OPT_mwarn_cell_microcode,
35490 "emitting microcode insn %s\t[%s] #%d",
35491 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
35492 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
35493 warning_at (location, OPT_mwarn_cell_microcode,
35494 "emitting conditional microcode insn %s\t[%s] #%d",
35495 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
35499 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35501 #if TARGET_ELF
35502 static unsigned HOST_WIDE_INT
35503 rs6000_asan_shadow_offset (void)
35505 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35507 #endif
35509 /* Mask options that we want to support inside of attribute((target)) and
35510 #pragma GCC target operations. Note, we do not include things like
35511 64/32-bit, endianess, hard/soft floating point, etc. that would have
35512 different calling sequences. */
35514 struct rs6000_opt_mask {
35515 const char *name; /* option name */
35516 HOST_WIDE_INT mask; /* mask to set */
35517 bool invert; /* invert sense of mask */
35518 bool valid_target; /* option is a target option */
35521 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35523 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35524 { "cmpb", OPTION_MASK_CMPB, false, true },
35525 { "crypto", OPTION_MASK_CRYPTO, false, true },
35526 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35527 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35528 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35529 false, true },
35530 { "float128", OPTION_MASK_FLOAT128, false, false },
35531 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
35532 { "fprnd", OPTION_MASK_FPRND, false, true },
35533 { "hard-dfp", OPTION_MASK_DFP, false, true },
35534 { "htm", OPTION_MASK_HTM, false, true },
35535 { "isel", OPTION_MASK_ISEL, false, true },
35536 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35537 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35538 { "modulo", OPTION_MASK_MODULO, false, true },
35539 { "mulhw", OPTION_MASK_MULHW, false, true },
35540 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35541 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35542 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35543 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35544 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35545 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35546 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
35547 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
35548 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35549 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35550 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35551 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35552 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35553 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35554 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35555 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35556 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35557 { "string", OPTION_MASK_STRING, false, true },
35558 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
35559 { "update", OPTION_MASK_NO_UPDATE, true , true },
35560 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
35561 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
35562 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
35563 { "vsx", OPTION_MASK_VSX, false, true },
35564 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
35565 #ifdef OPTION_MASK_64BIT
35566 #if TARGET_AIX_OS
35567 { "aix64", OPTION_MASK_64BIT, false, false },
35568 { "aix32", OPTION_MASK_64BIT, true, false },
35569 #else
35570 { "64", OPTION_MASK_64BIT, false, false },
35571 { "32", OPTION_MASK_64BIT, true, false },
35572 #endif
35573 #endif
35574 #ifdef OPTION_MASK_EABI
35575 { "eabi", OPTION_MASK_EABI, false, false },
35576 #endif
35577 #ifdef OPTION_MASK_LITTLE_ENDIAN
35578 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
35579 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
35580 #endif
35581 #ifdef OPTION_MASK_RELOCATABLE
35582 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
35583 #endif
35584 #ifdef OPTION_MASK_STRICT_ALIGN
35585 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
35586 #endif
35587 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
35588 { "string", OPTION_MASK_STRING, false, false },
35591 /* Builtin mask mapping for printing the flags. */
35592 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
35594 { "altivec", RS6000_BTM_ALTIVEC, false, false },
35595 { "vsx", RS6000_BTM_VSX, false, false },
35596 { "spe", RS6000_BTM_SPE, false, false },
35597 { "paired", RS6000_BTM_PAIRED, false, false },
35598 { "fre", RS6000_BTM_FRE, false, false },
35599 { "fres", RS6000_BTM_FRES, false, false },
35600 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
35601 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
35602 { "popcntd", RS6000_BTM_POPCNTD, false, false },
35603 { "cell", RS6000_BTM_CELL, false, false },
35604 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
35605 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
35606 { "crypto", RS6000_BTM_CRYPTO, false, false },
35607 { "htm", RS6000_BTM_HTM, false, false },
35608 { "hard-dfp", RS6000_BTM_DFP, false, false },
35609 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
35610 { "long-double-128", RS6000_BTM_LDBL128, false, false },
35611 { "float128", RS6000_BTM_FLOAT128, false, false },
35614 /* Option variables that we want to support inside attribute((target)) and
35615 #pragma GCC target operations. */
35617 struct rs6000_opt_var {
35618 const char *name; /* option name */
35619 size_t global_offset; /* offset of the option in global_options. */
35620 size_t target_offset; /* offset of the option in target options. */
35623 static struct rs6000_opt_var const rs6000_opt_vars[] =
35625 { "friz",
35626 offsetof (struct gcc_options, x_TARGET_FRIZ),
35627 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
35628 { "avoid-indexed-addresses",
35629 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
35630 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
35631 { "paired",
35632 offsetof (struct gcc_options, x_rs6000_paired_float),
35633 offsetof (struct cl_target_option, x_rs6000_paired_float), },
35634 { "longcall",
35635 offsetof (struct gcc_options, x_rs6000_default_long_calls),
35636 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
35637 { "optimize-swaps",
35638 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
35639 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
35640 { "allow-movmisalign",
35641 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
35642 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
35643 { "allow-df-permute",
35644 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
35645 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
35646 { "sched-groups",
35647 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
35648 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
35649 { "always-hint",
35650 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
35651 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
35652 { "align-branch-targets",
35653 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
35654 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
35655 { "vectorize-builtins",
35656 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
35657 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
35658 { "tls-markers",
35659 offsetof (struct gcc_options, x_tls_markers),
35660 offsetof (struct cl_target_option, x_tls_markers), },
35661 { "sched-prolog",
35662 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35663 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35664 { "sched-epilog",
35665 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35666 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35667 { "gen-cell-microcode",
35668 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
35669 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
35670 { "warn-cell-microcode",
35671 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
35672 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
35675 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
35676 parsing. Return true if there were no errors. */
35678 static bool
35679 rs6000_inner_target_options (tree args, bool attr_p)
35681 bool ret = true;
35683 if (args == NULL_TREE)
35686 else if (TREE_CODE (args) == STRING_CST)
35688 char *p = ASTRDUP (TREE_STRING_POINTER (args));
35689 char *q;
35691 while ((q = strtok (p, ",")) != NULL)
35693 bool error_p = false;
35694 bool not_valid_p = false;
35695 const char *cpu_opt = NULL;
35697 p = NULL;
35698 if (strncmp (q, "cpu=", 4) == 0)
35700 int cpu_index = rs6000_cpu_name_lookup (q+4);
35701 if (cpu_index >= 0)
35702 rs6000_cpu_index = cpu_index;
35703 else
35705 error_p = true;
35706 cpu_opt = q+4;
35709 else if (strncmp (q, "tune=", 5) == 0)
35711 int tune_index = rs6000_cpu_name_lookup (q+5);
35712 if (tune_index >= 0)
35713 rs6000_tune_index = tune_index;
35714 else
35716 error_p = true;
35717 cpu_opt = q+5;
35720 else
35722 size_t i;
35723 bool invert = false;
35724 char *r = q;
35726 error_p = true;
35727 if (strncmp (r, "no-", 3) == 0)
35729 invert = true;
35730 r += 3;
35733 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
35734 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
35736 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
35738 if (!rs6000_opt_masks[i].valid_target)
35739 not_valid_p = true;
35740 else
35742 error_p = false;
35743 rs6000_isa_flags_explicit |= mask;
35745 /* VSX needs altivec, so -mvsx automagically sets
35746 altivec and disables -mavoid-indexed-addresses. */
35747 if (!invert)
35749 if (mask == OPTION_MASK_VSX)
35751 mask |= OPTION_MASK_ALTIVEC;
35752 TARGET_AVOID_XFORM = 0;
35756 if (rs6000_opt_masks[i].invert)
35757 invert = !invert;
35759 if (invert)
35760 rs6000_isa_flags &= ~mask;
35761 else
35762 rs6000_isa_flags |= mask;
35764 break;
35767 if (error_p && !not_valid_p)
35769 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
35770 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
35772 size_t j = rs6000_opt_vars[i].global_offset;
35773 *((int *) ((char *)&global_options + j)) = !invert;
35774 error_p = false;
35775 not_valid_p = false;
35776 break;
35781 if (error_p)
35783 const char *eprefix, *esuffix;
35785 ret = false;
35786 if (attr_p)
35788 eprefix = "__attribute__((__target__(";
35789 esuffix = ")))";
35791 else
35793 eprefix = "#pragma GCC target ";
35794 esuffix = "";
35797 if (cpu_opt)
35798 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
35799 q, esuffix);
35800 else if (not_valid_p)
35801 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
35802 else
35803 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
35808 else if (TREE_CODE (args) == TREE_LIST)
35812 tree value = TREE_VALUE (args);
35813 if (value)
35815 bool ret2 = rs6000_inner_target_options (value, attr_p);
35816 if (!ret2)
35817 ret = false;
35819 args = TREE_CHAIN (args);
35821 while (args != NULL_TREE);
35824 else
35825 gcc_unreachable ();
35827 return ret;
35830 /* Print out the target options as a list for -mdebug=target. */
35832 static void
35833 rs6000_debug_target_options (tree args, const char *prefix)
35835 if (args == NULL_TREE)
35836 fprintf (stderr, "%s<NULL>", prefix);
35838 else if (TREE_CODE (args) == STRING_CST)
35840 char *p = ASTRDUP (TREE_STRING_POINTER (args));
35841 char *q;
35843 while ((q = strtok (p, ",")) != NULL)
35845 p = NULL;
35846 fprintf (stderr, "%s\"%s\"", prefix, q);
35847 prefix = ", ";
35851 else if (TREE_CODE (args) == TREE_LIST)
35855 tree value = TREE_VALUE (args);
35856 if (value)
35858 rs6000_debug_target_options (value, prefix);
35859 prefix = ", ";
35861 args = TREE_CHAIN (args);
35863 while (args != NULL_TREE);
35866 else
35867 gcc_unreachable ();
35869 return;
35873 /* Hook to validate attribute((target("..."))). */
35875 static bool
35876 rs6000_valid_attribute_p (tree fndecl,
35877 tree ARG_UNUSED (name),
35878 tree args,
35879 int flags)
35881 struct cl_target_option cur_target;
35882 bool ret;
35883 tree old_optimize = build_optimization_node (&global_options);
35884 tree new_target, new_optimize;
35885 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
35887 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
35889 if (TARGET_DEBUG_TARGET)
35891 tree tname = DECL_NAME (fndecl);
35892 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
35893 if (tname)
35894 fprintf (stderr, "function: %.*s\n",
35895 (int) IDENTIFIER_LENGTH (tname),
35896 IDENTIFIER_POINTER (tname));
35897 else
35898 fprintf (stderr, "function: unknown\n");
35900 fprintf (stderr, "args:");
35901 rs6000_debug_target_options (args, " ");
35902 fprintf (stderr, "\n");
35904 if (flags)
35905 fprintf (stderr, "flags: 0x%x\n", flags);
35907 fprintf (stderr, "--------------------\n");
35910 old_optimize = build_optimization_node (&global_options);
35911 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
35913 /* If the function changed the optimization levels as well as setting target
35914 options, start with the optimizations specified. */
35915 if (func_optimize && func_optimize != old_optimize)
35916 cl_optimization_restore (&global_options,
35917 TREE_OPTIMIZATION (func_optimize));
35919 /* The target attributes may also change some optimization flags, so update
35920 the optimization options if necessary. */
35921 cl_target_option_save (&cur_target, &global_options);
35922 rs6000_cpu_index = rs6000_tune_index = -1;
35923 ret = rs6000_inner_target_options (args, true);
35925 /* Set up any additional state. */
35926 if (ret)
35928 ret = rs6000_option_override_internal (false);
35929 new_target = build_target_option_node (&global_options);
35931 else
35932 new_target = NULL;
35934 new_optimize = build_optimization_node (&global_options);
35936 if (!new_target)
35937 ret = false;
35939 else if (fndecl)
35941 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
35943 if (old_optimize != new_optimize)
35944 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
35947 cl_target_option_restore (&global_options, &cur_target);
35949 if (old_optimize != new_optimize)
35950 cl_optimization_restore (&global_options,
35951 TREE_OPTIMIZATION (old_optimize));
35953 return ret;
35957 /* Hook to validate the current #pragma GCC target and set the state, and
35958 update the macros based on what was changed. If ARGS is NULL, then
35959 POP_TARGET is used to reset the options. */
35961 bool
35962 rs6000_pragma_target_parse (tree args, tree pop_target)
35964 tree prev_tree = build_target_option_node (&global_options);
35965 tree cur_tree;
35966 struct cl_target_option *prev_opt, *cur_opt;
35967 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
35968 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
35970 if (TARGET_DEBUG_TARGET)
35972 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
35973 fprintf (stderr, "args:");
35974 rs6000_debug_target_options (args, " ");
35975 fprintf (stderr, "\n");
35977 if (pop_target)
35979 fprintf (stderr, "pop_target:\n");
35980 debug_tree (pop_target);
35982 else
35983 fprintf (stderr, "pop_target: <NULL>\n");
35985 fprintf (stderr, "--------------------\n");
35988 if (! args)
35990 cur_tree = ((pop_target)
35991 ? pop_target
35992 : target_option_default_node);
35993 cl_target_option_restore (&global_options,
35994 TREE_TARGET_OPTION (cur_tree));
35996 else
35998 rs6000_cpu_index = rs6000_tune_index = -1;
35999 if (!rs6000_inner_target_options (args, false)
36000 || !rs6000_option_override_internal (false)
36001 || (cur_tree = build_target_option_node (&global_options))
36002 == NULL_TREE)
36004 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36005 fprintf (stderr, "invalid pragma\n");
36007 return false;
36011 target_option_current_node = cur_tree;
36013 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36014 change the macros that are defined. */
36015 if (rs6000_target_modify_macros_ptr)
36017 prev_opt = TREE_TARGET_OPTION (prev_tree);
36018 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36019 prev_flags = prev_opt->x_rs6000_isa_flags;
36021 cur_opt = TREE_TARGET_OPTION (cur_tree);
36022 cur_flags = cur_opt->x_rs6000_isa_flags;
36023 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36025 diff_bumask = (prev_bumask ^ cur_bumask);
36026 diff_flags = (prev_flags ^ cur_flags);
36028 if ((diff_flags != 0) || (diff_bumask != 0))
36030 /* Delete old macros. */
36031 rs6000_target_modify_macros_ptr (false,
36032 prev_flags & diff_flags,
36033 prev_bumask & diff_bumask);
36035 /* Define new macros. */
36036 rs6000_target_modify_macros_ptr (true,
36037 cur_flags & diff_flags,
36038 cur_bumask & diff_bumask);
36042 return true;
36046 /* Remember the last target of rs6000_set_current_function. */
36047 static GTY(()) tree rs6000_previous_fndecl;
36049 /* Establish appropriate back-end context for processing the function
36050 FNDECL. The argument might be NULL to indicate processing at top
36051 level, outside of any function scope. */
36052 static void
36053 rs6000_set_current_function (tree fndecl)
36055 tree old_tree = (rs6000_previous_fndecl
36056 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
36057 : NULL_TREE);
36059 tree new_tree = (fndecl
36060 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
36061 : NULL_TREE);
36063 if (TARGET_DEBUG_TARGET)
36065 bool print_final = false;
36066 fprintf (stderr, "\n==================== rs6000_set_current_function");
36068 if (fndecl)
36069 fprintf (stderr, ", fndecl %s (%p)",
36070 (DECL_NAME (fndecl)
36071 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36072 : "<unknown>"), (void *)fndecl);
36074 if (rs6000_previous_fndecl)
36075 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36077 fprintf (stderr, "\n");
36078 if (new_tree)
36080 fprintf (stderr, "\nnew fndecl target specific options:\n");
36081 debug_tree (new_tree);
36082 print_final = true;
36085 if (old_tree)
36087 fprintf (stderr, "\nold fndecl target specific options:\n");
36088 debug_tree (old_tree);
36089 print_final = true;
36092 if (print_final)
36093 fprintf (stderr, "--------------------\n");
36096 /* Only change the context if the function changes. This hook is called
36097 several times in the course of compiling a function, and we don't want to
36098 slow things down too much or call target_reinit when it isn't safe. */
36099 if (fndecl && fndecl != rs6000_previous_fndecl)
36101 rs6000_previous_fndecl = fndecl;
36102 if (old_tree == new_tree)
36105 else if (new_tree && new_tree != target_option_default_node)
36107 cl_target_option_restore (&global_options,
36108 TREE_TARGET_OPTION (new_tree));
36109 if (TREE_TARGET_GLOBALS (new_tree))
36110 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36111 else
36112 TREE_TARGET_GLOBALS (new_tree)
36113 = save_target_globals_default_opts ();
36116 else if (old_tree && old_tree != target_option_default_node)
36118 new_tree = target_option_current_node;
36119 cl_target_option_restore (&global_options,
36120 TREE_TARGET_OPTION (new_tree));
36121 if (TREE_TARGET_GLOBALS (new_tree))
36122 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36123 else if (new_tree == target_option_default_node)
36124 restore_target_globals (&default_target_globals);
36125 else
36126 TREE_TARGET_GLOBALS (new_tree)
36127 = save_target_globals_default_opts ();
36133 /* Save the current options */
36135 static void
36136 rs6000_function_specific_save (struct cl_target_option *ptr,
36137 struct gcc_options *opts)
36139 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36140 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36143 /* Restore the current options */
36145 static void
36146 rs6000_function_specific_restore (struct gcc_options *opts,
36147 struct cl_target_option *ptr)
36150 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36151 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36152 (void) rs6000_option_override_internal (false);
36155 /* Print the current options */
36157 static void
36158 rs6000_function_specific_print (FILE *file, int indent,
36159 struct cl_target_option *ptr)
36161 rs6000_print_isa_options (file, indent, "Isa options set",
36162 ptr->x_rs6000_isa_flags);
36164 rs6000_print_isa_options (file, indent, "Isa options explicit",
36165 ptr->x_rs6000_isa_flags_explicit);
36168 /* Helper function to print the current isa or misc options on a line. */
36170 static void
36171 rs6000_print_options_internal (FILE *file,
36172 int indent,
36173 const char *string,
36174 HOST_WIDE_INT flags,
36175 const char *prefix,
36176 const struct rs6000_opt_mask *opts,
36177 size_t num_elements)
36179 size_t i;
36180 size_t start_column = 0;
36181 size_t cur_column;
36182 size_t max_column = 120;
36183 size_t prefix_len = strlen (prefix);
36184 size_t comma_len = 0;
36185 const char *comma = "";
36187 if (indent)
36188 start_column += fprintf (file, "%*s", indent, "");
36190 if (!flags)
36192 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36193 return;
36196 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36198 /* Print the various mask options. */
36199 cur_column = start_column;
36200 for (i = 0; i < num_elements; i++)
36202 bool invert = opts[i].invert;
36203 const char *name = opts[i].name;
36204 const char *no_str = "";
36205 HOST_WIDE_INT mask = opts[i].mask;
36206 size_t len = comma_len + prefix_len + strlen (name);
36208 if (!invert)
36210 if ((flags & mask) == 0)
36212 no_str = "no-";
36213 len += sizeof ("no-") - 1;
36216 flags &= ~mask;
36219 else
36221 if ((flags & mask) != 0)
36223 no_str = "no-";
36224 len += sizeof ("no-") - 1;
36227 flags |= mask;
36230 cur_column += len;
36231 if (cur_column > max_column)
36233 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36234 cur_column = start_column + len;
36235 comma = "";
36238 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36239 comma = ", ";
36240 comma_len = sizeof (", ") - 1;
36243 fputs ("\n", file);
36246 /* Helper function to print the current isa options on a line. */
36248 static void
36249 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36250 HOST_WIDE_INT flags)
36252 rs6000_print_options_internal (file, indent, string, flags, "-m",
36253 &rs6000_opt_masks[0],
36254 ARRAY_SIZE (rs6000_opt_masks));
36257 static void
36258 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36259 HOST_WIDE_INT flags)
36261 rs6000_print_options_internal (file, indent, string, flags, "",
36262 &rs6000_builtin_mask_names[0],
36263 ARRAY_SIZE (rs6000_builtin_mask_names));
36267 /* Hook to determine if one function can safely inline another. */
36269 static bool
36270 rs6000_can_inline_p (tree caller, tree callee)
36272 bool ret = false;
36273 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
36274 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
36276 /* If callee has no option attributes, then it is ok to inline. */
36277 if (!callee_tree)
36278 ret = true;
36280 /* If caller has no option attributes, but callee does then it is not ok to
36281 inline. */
36282 else if (!caller_tree)
36283 ret = false;
36285 else
36287 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
36288 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
36290 /* Callee's options should a subset of the caller's, i.e. a vsx function
36291 can inline an altivec function but a non-vsx function can't inline a
36292 vsx function. */
36293 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
36294 == callee_opts->x_rs6000_isa_flags)
36295 ret = true;
36298 if (TARGET_DEBUG_TARGET)
36299 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
36300 (DECL_NAME (caller)
36301 ? IDENTIFIER_POINTER (DECL_NAME (caller))
36302 : "<unknown>"),
36303 (DECL_NAME (callee)
36304 ? IDENTIFIER_POINTER (DECL_NAME (callee))
36305 : "<unknown>"),
36306 (ret ? "can" : "cannot"));
36308 return ret;
36311 /* Allocate a stack temp and fixup the address so it meets the particular
36312 memory requirements (either offetable or REG+REG addressing). */
36315 rs6000_allocate_stack_temp (machine_mode mode,
36316 bool offsettable_p,
36317 bool reg_reg_p)
36319 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
36320 rtx addr = XEXP (stack, 0);
36321 int strict_p = (reload_in_progress || reload_completed);
36323 if (!legitimate_indirect_address_p (addr, strict_p))
36325 if (offsettable_p
36326 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
36327 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
36329 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
36330 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
36333 return stack;
36336 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
36337 to such a form to deal with memory reference instructions like STFIWX that
36338 only take reg+reg addressing. */
36341 rs6000_address_for_fpconvert (rtx x)
36343 int strict_p = (reload_in_progress || reload_completed);
36344 rtx addr;
36346 gcc_assert (MEM_P (x));
36347 addr = XEXP (x, 0);
36348 if (! legitimate_indirect_address_p (addr, strict_p)
36349 && ! legitimate_indexed_address_p (addr, strict_p))
36351 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
36353 rtx reg = XEXP (addr, 0);
36354 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
36355 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
36356 gcc_assert (REG_P (reg));
36357 emit_insn (gen_add3_insn (reg, reg, size_rtx));
36358 addr = reg;
36360 else if (GET_CODE (addr) == PRE_MODIFY)
36362 rtx reg = XEXP (addr, 0);
36363 rtx expr = XEXP (addr, 1);
36364 gcc_assert (REG_P (reg));
36365 gcc_assert (GET_CODE (expr) == PLUS);
36366 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
36367 addr = reg;
36370 x = replace_equiv_address (x, copy_addr_to_reg (addr));
36373 return x;
36376 /* Given a memory reference, if it is not in the form for altivec memory
36377 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
36378 convert to the altivec format. */
36381 rs6000_address_for_altivec (rtx x)
36383 gcc_assert (MEM_P (x));
36384 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
36386 rtx addr = XEXP (x, 0);
36387 int strict_p = (reload_in_progress || reload_completed);
36389 if (!legitimate_indexed_address_p (addr, strict_p)
36390 && !legitimate_indirect_address_p (addr, strict_p))
36391 addr = copy_to_mode_reg (Pmode, addr);
36393 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
36394 x = change_address (x, GET_MODE (x), addr);
36397 return x;
36400 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
36402 On the RS/6000, all integer constants are acceptable, most won't be valid
36403 for particular insns, though. Only easy FP constants are acceptable. */
36405 static bool
36406 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
36408 if (TARGET_ELF && tls_referenced_p (x))
36409 return false;
36411 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
36412 || GET_MODE (x) == VOIDmode
36413 || (TARGET_POWERPC64 && mode == DImode)
36414 || easy_fp_constant (x, mode)
36415 || easy_vector_constant (x, mode));
36419 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
36421 static bool
36422 chain_already_loaded (rtx_insn *last)
36424 for (; last != NULL; last = PREV_INSN (last))
36426 if (NONJUMP_INSN_P (last))
36428 rtx patt = PATTERN (last);
36430 if (GET_CODE (patt) == SET)
36432 rtx lhs = XEXP (patt, 0);
36434 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
36435 return true;
36439 return false;
36442 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
36444 void
36445 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
36447 const bool direct_call_p
36448 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
36449 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
36450 rtx toc_load = NULL_RTX;
36451 rtx toc_restore = NULL_RTX;
36452 rtx func_addr;
36453 rtx abi_reg = NULL_RTX;
36454 rtx call[4];
36455 int n_call;
36456 rtx insn;
36458 /* Handle longcall attributes. */
36459 if (INTVAL (cookie) & CALL_LONG)
36460 func_desc = rs6000_longcall_ref (func_desc);
36462 /* Handle indirect calls. */
36463 if (GET_CODE (func_desc) != SYMBOL_REF
36464 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
36466 /* Save the TOC into its reserved slot before the call,
36467 and prepare to restore it after the call. */
36468 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
36469 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
36470 rtx stack_toc_mem = gen_frame_mem (Pmode,
36471 gen_rtx_PLUS (Pmode, stack_ptr,
36472 stack_toc_offset));
36473 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
36474 gen_rtvec (1, stack_toc_offset),
36475 UNSPEC_TOCSLOT);
36476 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
36478 /* Can we optimize saving the TOC in the prologue or
36479 do we need to do it at every call? */
36480 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
36481 cfun->machine->save_toc_in_prologue = true;
36482 else
36484 MEM_VOLATILE_P (stack_toc_mem) = 1;
36485 emit_move_insn (stack_toc_mem, toc_reg);
36488 if (DEFAULT_ABI == ABI_ELFv2)
36490 /* A function pointer in the ELFv2 ABI is just a plain address, but
36491 the ABI requires it to be loaded into r12 before the call. */
36492 func_addr = gen_rtx_REG (Pmode, 12);
36493 emit_move_insn (func_addr, func_desc);
36494 abi_reg = func_addr;
36496 else
36498 /* A function pointer under AIX is a pointer to a data area whose
36499 first word contains the actual address of the function, whose
36500 second word contains a pointer to its TOC, and whose third word
36501 contains a value to place in the static chain register (r11).
36502 Note that if we load the static chain, our "trampoline" need
36503 not have any executable code. */
36505 /* Load up address of the actual function. */
36506 func_desc = force_reg (Pmode, func_desc);
36507 func_addr = gen_reg_rtx (Pmode);
36508 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
36510 /* Prepare to load the TOC of the called function. Note that the
36511 TOC load must happen immediately before the actual call so
36512 that unwinding the TOC registers works correctly. See the
36513 comment in frob_update_context. */
36514 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
36515 rtx func_toc_mem = gen_rtx_MEM (Pmode,
36516 gen_rtx_PLUS (Pmode, func_desc,
36517 func_toc_offset));
36518 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
36520 /* If we have a static chain, load it up. But, if the call was
36521 originally direct, the 3rd word has not been written since no
36522 trampoline has been built, so we ought not to load it, lest we
36523 override a static chain value. */
36524 if (!direct_call_p
36525 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
36526 && !chain_already_loaded (get_current_sequence ()->next->last))
36528 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
36529 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
36530 rtx func_sc_mem = gen_rtx_MEM (Pmode,
36531 gen_rtx_PLUS (Pmode, func_desc,
36532 func_sc_offset));
36533 emit_move_insn (sc_reg, func_sc_mem);
36534 abi_reg = sc_reg;
36538 else
36540 /* Direct calls use the TOC: for local calls, the callee will
36541 assume the TOC register is set; for non-local calls, the
36542 PLT stub needs the TOC register. */
36543 abi_reg = toc_reg;
36544 func_addr = func_desc;
36547 /* Create the call. */
36548 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
36549 if (value != NULL_RTX)
36550 call[0] = gen_rtx_SET (value, call[0]);
36551 n_call = 1;
36553 if (toc_load)
36554 call[n_call++] = toc_load;
36555 if (toc_restore)
36556 call[n_call++] = toc_restore;
36558 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
36560 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
36561 insn = emit_call_insn (insn);
36563 /* Mention all registers defined by the ABI to hold information
36564 as uses in CALL_INSN_FUNCTION_USAGE. */
36565 if (abi_reg)
36566 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
36569 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
36571 void
36572 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
36574 rtx call[2];
36575 rtx insn;
36577 gcc_assert (INTVAL (cookie) == 0);
36579 /* Create the call. */
36580 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
36581 if (value != NULL_RTX)
36582 call[0] = gen_rtx_SET (value, call[0]);
36584 call[1] = simple_return_rtx;
36586 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
36587 insn = emit_call_insn (insn);
36589 /* Note use of the TOC register. */
36590 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
36591 /* We need to also mark a use of the link register since the function we
36592 sibling-call to will use it to return to our caller. */
36593 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
36596 /* Return whether we need to always update the saved TOC pointer when we update
36597 the stack pointer. */
36599 static bool
36600 rs6000_save_toc_in_prologue_p (void)
36602 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
36605 #ifdef HAVE_GAS_HIDDEN
36606 # define USE_HIDDEN_LINKONCE 1
36607 #else
36608 # define USE_HIDDEN_LINKONCE 0
36609 #endif
36611 /* Fills in the label name that should be used for a 476 link stack thunk. */
36613 void
36614 get_ppc476_thunk_name (char name[32])
36616 gcc_assert (TARGET_LINK_STACK);
36618 if (USE_HIDDEN_LINKONCE)
36619 sprintf (name, "__ppc476.get_thunk");
36620 else
36621 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
36624 /* This function emits the simple thunk routine that is used to preserve
36625 the link stack on the 476 cpu. */
36627 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
36628 static void
36629 rs6000_code_end (void)
36631 char name[32];
36632 tree decl;
36634 if (!TARGET_LINK_STACK)
36635 return;
36637 get_ppc476_thunk_name (name);
36639 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
36640 build_function_type_list (void_type_node, NULL_TREE));
36641 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
36642 NULL_TREE, void_type_node);
36643 TREE_PUBLIC (decl) = 1;
36644 TREE_STATIC (decl) = 1;
36646 #if RS6000_WEAK
36647 if (USE_HIDDEN_LINKONCE)
36649 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
36650 targetm.asm_out.unique_section (decl, 0);
36651 switch_to_section (get_named_section (decl, NULL, 0));
36652 DECL_WEAK (decl) = 1;
36653 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
36654 targetm.asm_out.globalize_label (asm_out_file, name);
36655 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
36656 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
36658 else
36659 #endif
36661 switch_to_section (text_section);
36662 ASM_OUTPUT_LABEL (asm_out_file, name);
36665 DECL_INITIAL (decl) = make_node (BLOCK);
36666 current_function_decl = decl;
36667 allocate_struct_function (decl, false);
36668 init_function_start (decl);
36669 first_function_block_is_cold = false;
36670 /* Make sure unwind info is emitted for the thunk if needed. */
36671 final_start_function (emit_barrier (), asm_out_file, 1);
36673 fputs ("\tblr\n", asm_out_file);
36675 final_end_function ();
36676 init_insn_lengths ();
36677 free_after_compilation (cfun);
36678 set_cfun (NULL);
36679 current_function_decl = NULL;
36682 /* Add r30 to hard reg set if the prologue sets it up and it is not
36683 pic_offset_table_rtx. */
36685 static void
36686 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
36688 if (!TARGET_SINGLE_PIC_BASE
36689 && TARGET_TOC
36690 && TARGET_MINIMAL_TOC
36691 && get_pool_size () != 0)
36692 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
36693 if (cfun->machine->split_stack_argp_used)
36694 add_to_hard_reg_set (&set->set, Pmode, 12);
36698 /* Helper function for rs6000_split_logical to emit a logical instruction after
36699 spliting the operation to single GPR registers.
36701 DEST is the destination register.
36702 OP1 and OP2 are the input source registers.
36703 CODE is the base operation (AND, IOR, XOR, NOT).
36704 MODE is the machine mode.
36705 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
36706 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
36707 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
36709 static void
36710 rs6000_split_logical_inner (rtx dest,
36711 rtx op1,
36712 rtx op2,
36713 enum rtx_code code,
36714 machine_mode mode,
36715 bool complement_final_p,
36716 bool complement_op1_p,
36717 bool complement_op2_p)
36719 rtx bool_rtx;
36721 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
36722 if (op2 && GET_CODE (op2) == CONST_INT
36723 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
36724 && !complement_final_p && !complement_op1_p && !complement_op2_p)
36726 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
36727 HOST_WIDE_INT value = INTVAL (op2) & mask;
36729 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
36730 if (code == AND)
36732 if (value == 0)
36734 emit_insn (gen_rtx_SET (dest, const0_rtx));
36735 return;
36738 else if (value == mask)
36740 if (!rtx_equal_p (dest, op1))
36741 emit_insn (gen_rtx_SET (dest, op1));
36742 return;
36746 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
36747 into separate ORI/ORIS or XORI/XORIS instrucitons. */
36748 else if (code == IOR || code == XOR)
36750 if (value == 0)
36752 if (!rtx_equal_p (dest, op1))
36753 emit_insn (gen_rtx_SET (dest, op1));
36754 return;
36759 if (code == AND && mode == SImode
36760 && !complement_final_p && !complement_op1_p && !complement_op2_p)
36762 emit_insn (gen_andsi3 (dest, op1, op2));
36763 return;
36766 if (complement_op1_p)
36767 op1 = gen_rtx_NOT (mode, op1);
36769 if (complement_op2_p)
36770 op2 = gen_rtx_NOT (mode, op2);
36772 /* For canonical RTL, if only one arm is inverted it is the first. */
36773 if (!complement_op1_p && complement_op2_p)
36774 std::swap (op1, op2);
36776 bool_rtx = ((code == NOT)
36777 ? gen_rtx_NOT (mode, op1)
36778 : gen_rtx_fmt_ee (code, mode, op1, op2));
36780 if (complement_final_p)
36781 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
36783 emit_insn (gen_rtx_SET (dest, bool_rtx));
36786 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
36787 operations are split immediately during RTL generation to allow for more
36788 optimizations of the AND/IOR/XOR.
36790 OPERANDS is an array containing the destination and two input operands.
36791 CODE is the base operation (AND, IOR, XOR, NOT).
36792 MODE is the machine mode.
36793 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
36794 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
36795 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
36796 CLOBBER_REG is either NULL or a scratch register of type CC to allow
36797 formation of the AND instructions. */
36799 static void
36800 rs6000_split_logical_di (rtx operands[3],
36801 enum rtx_code code,
36802 bool complement_final_p,
36803 bool complement_op1_p,
36804 bool complement_op2_p)
36806 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
36807 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
36808 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
36809 enum hi_lo { hi = 0, lo = 1 };
36810 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
36811 size_t i;
36813 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
36814 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
36815 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
36816 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
36818 if (code == NOT)
36819 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
36820 else
36822 if (GET_CODE (operands[2]) != CONST_INT)
36824 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
36825 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
36827 else
36829 HOST_WIDE_INT value = INTVAL (operands[2]);
36830 HOST_WIDE_INT value_hi_lo[2];
36832 gcc_assert (!complement_final_p);
36833 gcc_assert (!complement_op1_p);
36834 gcc_assert (!complement_op2_p);
36836 value_hi_lo[hi] = value >> 32;
36837 value_hi_lo[lo] = value & lower_32bits;
36839 for (i = 0; i < 2; i++)
36841 HOST_WIDE_INT sub_value = value_hi_lo[i];
36843 if (sub_value & sign_bit)
36844 sub_value |= upper_32bits;
36846 op2_hi_lo[i] = GEN_INT (sub_value);
36848 /* If this is an AND instruction, check to see if we need to load
36849 the value in a register. */
36850 if (code == AND && sub_value != -1 && sub_value != 0
36851 && !and_operand (op2_hi_lo[i], SImode))
36852 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
36857 for (i = 0; i < 2; i++)
36859 /* Split large IOR/XOR operations. */
36860 if ((code == IOR || code == XOR)
36861 && GET_CODE (op2_hi_lo[i]) == CONST_INT
36862 && !complement_final_p
36863 && !complement_op1_p
36864 && !complement_op2_p
36865 && !logical_const_operand (op2_hi_lo[i], SImode))
36867 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
36868 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
36869 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
36870 rtx tmp = gen_reg_rtx (SImode);
36872 /* Make sure the constant is sign extended. */
36873 if ((hi_16bits & sign_bit) != 0)
36874 hi_16bits |= upper_32bits;
36876 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
36877 code, SImode, false, false, false);
36879 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
36880 code, SImode, false, false, false);
36882 else
36883 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
36884 code, SImode, complement_final_p,
36885 complement_op1_p, complement_op2_p);
36888 return;
36891 /* Split the insns that make up boolean operations operating on multiple GPR
36892 registers. The boolean MD patterns ensure that the inputs either are
36893 exactly the same as the output registers, or there is no overlap.
36895 OPERANDS is an array containing the destination and two input operands.
36896 CODE is the base operation (AND, IOR, XOR, NOT).
36897 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
36898 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
36899 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
36901 void
36902 rs6000_split_logical (rtx operands[3],
36903 enum rtx_code code,
36904 bool complement_final_p,
36905 bool complement_op1_p,
36906 bool complement_op2_p)
36908 machine_mode mode = GET_MODE (operands[0]);
36909 machine_mode sub_mode;
36910 rtx op0, op1, op2;
36911 int sub_size, regno0, regno1, nregs, i;
36913 /* If this is DImode, use the specialized version that can run before
36914 register allocation. */
36915 if (mode == DImode && !TARGET_POWERPC64)
36917 rs6000_split_logical_di (operands, code, complement_final_p,
36918 complement_op1_p, complement_op2_p);
36919 return;
36922 op0 = operands[0];
36923 op1 = operands[1];
36924 op2 = (code == NOT) ? NULL_RTX : operands[2];
36925 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
36926 sub_size = GET_MODE_SIZE (sub_mode);
36927 regno0 = REGNO (op0);
36928 regno1 = REGNO (op1);
36930 gcc_assert (reload_completed);
36931 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
36932 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
36934 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
36935 gcc_assert (nregs > 1);
36937 if (op2 && REG_P (op2))
36938 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
36940 for (i = 0; i < nregs; i++)
36942 int offset = i * sub_size;
36943 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
36944 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
36945 rtx sub_op2 = ((code == NOT)
36946 ? NULL_RTX
36947 : simplify_subreg (sub_mode, op2, mode, offset));
36949 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
36950 complement_final_p, complement_op1_p,
36951 complement_op2_p);
36954 return;
36958 /* Return true if the peephole2 can combine a load involving a combination of
36959 an addis instruction and a load with an offset that can be fused together on
36960 a power8. */
36962 bool
36963 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
36964 rtx addis_value, /* addis value. */
36965 rtx target, /* target register that is loaded. */
36966 rtx mem) /* bottom part of the memory addr. */
36968 rtx addr;
36969 rtx base_reg;
36971 /* Validate arguments. */
36972 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
36973 return false;
36975 if (!base_reg_operand (target, GET_MODE (target)))
36976 return false;
36978 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
36979 return false;
36981 /* Allow sign/zero extension. */
36982 if (GET_CODE (mem) == ZERO_EXTEND
36983 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
36984 mem = XEXP (mem, 0);
36986 if (!MEM_P (mem))
36987 return false;
36989 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
36990 return false;
36992 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
36993 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
36994 return false;
36996 /* Validate that the register used to load the high value is either the
36997 register being loaded, or we can safely replace its use.
36999 This function is only called from the peephole2 pass and we assume that
37000 there are 2 instructions in the peephole (addis and load), so we want to
37001 check if the target register was not used in the memory address and the
37002 register to hold the addis result is dead after the peephole. */
37003 if (REGNO (addis_reg) != REGNO (target))
37005 if (reg_mentioned_p (target, mem))
37006 return false;
37008 if (!peep2_reg_dead_p (2, addis_reg))
37009 return false;
37011 /* If the target register being loaded is the stack pointer, we must
37012 avoid loading any other value into it, even temporarily. */
37013 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37014 return false;
37017 base_reg = XEXP (addr, 0);
37018 return REGNO (addis_reg) == REGNO (base_reg);
37021 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37022 sequence. We adjust the addis register to use the target register. If the
37023 load sign extends, we adjust the code to do the zero extending load, and an
37024 explicit sign extension later since the fusion only covers zero extending
37025 loads.
37027 The operands are:
37028 operands[0] register set with addis (to be replaced with target)
37029 operands[1] value set via addis
37030 operands[2] target register being loaded
37031 operands[3] D-form memory reference using operands[0]. */
37033 void
37034 expand_fusion_gpr_load (rtx *operands)
37036 rtx addis_value = operands[1];
37037 rtx target = operands[2];
37038 rtx orig_mem = operands[3];
37039 rtx new_addr, new_mem, orig_addr, offset;
37040 enum rtx_code plus_or_lo_sum;
37041 machine_mode target_mode = GET_MODE (target);
37042 machine_mode extend_mode = target_mode;
37043 machine_mode ptr_mode = Pmode;
37044 enum rtx_code extend = UNKNOWN;
37046 if (GET_CODE (orig_mem) == ZERO_EXTEND
37047 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37049 extend = GET_CODE (orig_mem);
37050 orig_mem = XEXP (orig_mem, 0);
37051 target_mode = GET_MODE (orig_mem);
37054 gcc_assert (MEM_P (orig_mem));
37056 orig_addr = XEXP (orig_mem, 0);
37057 plus_or_lo_sum = GET_CODE (orig_addr);
37058 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37060 offset = XEXP (orig_addr, 1);
37061 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37062 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37064 if (extend != UNKNOWN)
37065 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37067 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37068 UNSPEC_FUSION_GPR);
37069 emit_insn (gen_rtx_SET (target, new_mem));
37071 if (extend == SIGN_EXTEND)
37073 int sub_off = ((BYTES_BIG_ENDIAN)
37074 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37075 : 0);
37076 rtx sign_reg
37077 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37079 emit_insn (gen_rtx_SET (target,
37080 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37083 return;
37086 /* Emit the addis instruction that will be part of a fused instruction
37087 sequence. */
37089 void
37090 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
37091 const char *mode_name)
37093 rtx fuse_ops[10];
37094 char insn_template[80];
37095 const char *addis_str = NULL;
37096 const char *comment_str = ASM_COMMENT_START;
37098 if (*comment_str == ' ')
37099 comment_str++;
37101 /* Emit the addis instruction. */
37102 fuse_ops[0] = target;
37103 if (satisfies_constraint_L (addis_value))
37105 fuse_ops[1] = addis_value;
37106 addis_str = "lis %0,%v1";
37109 else if (GET_CODE (addis_value) == PLUS)
37111 rtx op0 = XEXP (addis_value, 0);
37112 rtx op1 = XEXP (addis_value, 1);
37114 if (REG_P (op0) && CONST_INT_P (op1)
37115 && satisfies_constraint_L (op1))
37117 fuse_ops[1] = op0;
37118 fuse_ops[2] = op1;
37119 addis_str = "addis %0,%1,%v2";
37123 else if (GET_CODE (addis_value) == HIGH)
37125 rtx value = XEXP (addis_value, 0);
37126 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37128 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37129 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37130 if (TARGET_ELF)
37131 addis_str = "addis %0,%2,%1@toc@ha";
37133 else if (TARGET_XCOFF)
37134 addis_str = "addis %0,%1@u(%2)";
37136 else
37137 gcc_unreachable ();
37140 else if (GET_CODE (value) == PLUS)
37142 rtx op0 = XEXP (value, 0);
37143 rtx op1 = XEXP (value, 1);
37145 if (GET_CODE (op0) == UNSPEC
37146 && XINT (op0, 1) == UNSPEC_TOCREL
37147 && CONST_INT_P (op1))
37149 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37150 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37151 fuse_ops[3] = op1;
37152 if (TARGET_ELF)
37153 addis_str = "addis %0,%2,%1+%3@toc@ha";
37155 else if (TARGET_XCOFF)
37156 addis_str = "addis %0,%1+%3@u(%2)";
37158 else
37159 gcc_unreachable ();
37163 else if (satisfies_constraint_L (value))
37165 fuse_ops[1] = value;
37166 addis_str = "lis %0,%v1";
37169 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37171 fuse_ops[1] = value;
37172 addis_str = "lis %0,%1@ha";
37176 if (!addis_str)
37177 fatal_insn ("Could not generate addis value for fusion", addis_value);
37179 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
37180 comment, mode_name);
37181 output_asm_insn (insn_template, fuse_ops);
37184 /* Emit a D-form load or store instruction that is the second instruction
37185 of a fusion sequence. */
37187 void
37188 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37189 const char *insn_str)
37191 rtx fuse_ops[10];
37192 char insn_template[80];
37194 fuse_ops[0] = load_store_reg;
37195 fuse_ops[1] = addis_reg;
37197 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37199 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37200 fuse_ops[2] = offset;
37201 output_asm_insn (insn_template, fuse_ops);
37204 else if (GET_CODE (offset) == UNSPEC
37205 && XINT (offset, 1) == UNSPEC_TOCREL)
37207 if (TARGET_ELF)
37208 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37210 else if (TARGET_XCOFF)
37211 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37213 else
37214 gcc_unreachable ();
37216 fuse_ops[2] = XVECEXP (offset, 0, 0);
37217 output_asm_insn (insn_template, fuse_ops);
37220 else if (GET_CODE (offset) == PLUS
37221 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37222 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37223 && CONST_INT_P (XEXP (offset, 1)))
37225 rtx tocrel_unspec = XEXP (offset, 0);
37226 if (TARGET_ELF)
37227 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37229 else if (TARGET_XCOFF)
37230 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37232 else
37233 gcc_unreachable ();
37235 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37236 fuse_ops[3] = XEXP (offset, 1);
37237 output_asm_insn (insn_template, fuse_ops);
37240 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
37242 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37244 fuse_ops[2] = offset;
37245 output_asm_insn (insn_template, fuse_ops);
37248 else
37249 fatal_insn ("Unable to generate load/store offset for fusion", offset);
37251 return;
37254 /* Wrap a TOC address that can be fused to indicate that special fusion
37255 processing is needed. */
37258 fusion_wrap_memory_address (rtx old_mem)
37260 rtx old_addr = XEXP (old_mem, 0);
37261 rtvec v = gen_rtvec (1, old_addr);
37262 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
37263 return replace_equiv_address_nv (old_mem, new_addr, false);
37266 /* Given an address, convert it into the addis and load offset parts. Addresses
37267 created during the peephole2 process look like:
37268 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
37269 (unspec [(...)] UNSPEC_TOCREL))
37271 Addresses created via toc fusion look like:
37272 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
37274 static void
37275 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
37277 rtx hi, lo;
37279 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
37281 lo = XVECEXP (addr, 0, 0);
37282 hi = gen_rtx_HIGH (Pmode, lo);
37284 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
37286 hi = XEXP (addr, 0);
37287 lo = XEXP (addr, 1);
37289 else
37290 gcc_unreachable ();
37292 *p_hi = hi;
37293 *p_lo = lo;
37296 /* Return a string to fuse an addis instruction with a gpr load to the same
37297 register that we loaded up the addis instruction. The address that is used
37298 is the logical address that was formed during peephole2:
37299 (lo_sum (high) (low-part))
37301 Or the address is the TOC address that is wrapped before register allocation:
37302 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
37304 The code is complicated, so we call output_asm_insn directly, and just
37305 return "". */
37307 const char *
37308 emit_fusion_gpr_load (rtx target, rtx mem)
37310 rtx addis_value;
37311 rtx addr;
37312 rtx load_offset;
37313 const char *load_str = NULL;
37314 const char *mode_name = NULL;
37315 machine_mode mode;
37317 if (GET_CODE (mem) == ZERO_EXTEND)
37318 mem = XEXP (mem, 0);
37320 gcc_assert (REG_P (target) && MEM_P (mem));
37322 addr = XEXP (mem, 0);
37323 fusion_split_address (addr, &addis_value, &load_offset);
37325 /* Now emit the load instruction to the same register. */
37326 mode = GET_MODE (mem);
37327 switch (mode)
37329 case QImode:
37330 mode_name = "char";
37331 load_str = "lbz";
37332 break;
37334 case HImode:
37335 mode_name = "short";
37336 load_str = "lhz";
37337 break;
37339 case SImode:
37340 case SFmode:
37341 mode_name = (mode == SFmode) ? "float" : "int";
37342 load_str = "lwz";
37343 break;
37345 case DImode:
37346 case DFmode:
37347 gcc_assert (TARGET_POWERPC64);
37348 mode_name = (mode == DFmode) ? "double" : "long";
37349 load_str = "ld";
37350 break;
37352 default:
37353 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
37356 /* Emit the addis instruction. */
37357 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
37359 /* Emit the D-form load instruction. */
37360 emit_fusion_load_store (target, target, load_offset, load_str);
37362 return "";
37366 /* Return true if the peephole2 can combine a load/store involving a
37367 combination of an addis instruction and the memory operation. This was
37368 added to the ISA 3.0 (power9) hardware. */
37370 bool
37371 fusion_p9_p (rtx addis_reg, /* register set via addis. */
37372 rtx addis_value, /* addis value. */
37373 rtx dest, /* destination (memory or register). */
37374 rtx src) /* source (register or memory). */
37376 rtx addr, mem, offset;
37377 enum machine_mode mode = GET_MODE (src);
37379 /* Validate arguments. */
37380 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37381 return false;
37383 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37384 return false;
37386 /* Ignore extend operations that are part of the load. */
37387 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
37388 src = XEXP (src, 0);
37390 /* Test for memory<-register or register<-memory. */
37391 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
37393 if (!MEM_P (dest))
37394 return false;
37396 mem = dest;
37399 else if (MEM_P (src))
37401 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
37402 return false;
37404 mem = src;
37407 else
37408 return false;
37410 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37411 if (GET_CODE (addr) == PLUS)
37413 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
37414 return false;
37416 return satisfies_constraint_I (XEXP (addr, 1));
37419 else if (GET_CODE (addr) == LO_SUM)
37421 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
37422 return false;
37424 offset = XEXP (addr, 1);
37425 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
37426 return small_toc_ref (offset, GET_MODE (offset));
37428 else if (TARGET_ELF && !TARGET_POWERPC64)
37429 return CONSTANT_P (offset);
37432 return false;
37435 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
37436 load sequence.
37438 The operands are:
37439 operands[0] register set with addis
37440 operands[1] value set via addis
37441 operands[2] target register being loaded
37442 operands[3] D-form memory reference using operands[0].
37444 This is similar to the fusion introduced with power8, except it scales to
37445 both loads/stores and does not require the result register to be the same as
37446 the base register. At the moment, we only do this if register set with addis
37447 is dead. */
37449 void
37450 expand_fusion_p9_load (rtx *operands)
37452 rtx tmp_reg = operands[0];
37453 rtx addis_value = operands[1];
37454 rtx target = operands[2];
37455 rtx orig_mem = operands[3];
37456 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
37457 enum rtx_code plus_or_lo_sum;
37458 machine_mode target_mode = GET_MODE (target);
37459 machine_mode extend_mode = target_mode;
37460 machine_mode ptr_mode = Pmode;
37461 enum rtx_code extend = UNKNOWN;
37463 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
37465 extend = GET_CODE (orig_mem);
37466 orig_mem = XEXP (orig_mem, 0);
37467 target_mode = GET_MODE (orig_mem);
37470 gcc_assert (MEM_P (orig_mem));
37472 orig_addr = XEXP (orig_mem, 0);
37473 plus_or_lo_sum = GET_CODE (orig_addr);
37474 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37476 offset = XEXP (orig_addr, 1);
37477 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37478 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37480 if (extend != UNKNOWN)
37481 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
37483 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37484 UNSPEC_FUSION_P9);
37486 set = gen_rtx_SET (target, new_mem);
37487 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
37488 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
37489 emit_insn (insn);
37491 return;
37494 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
37495 store sequence.
37497 The operands are:
37498 operands[0] register set with addis
37499 operands[1] value set via addis
37500 operands[2] target D-form memory being stored to
37501 operands[3] register being stored
37503 This is similar to the fusion introduced with power8, except it scales to
37504 both loads/stores and does not require the result register to be the same as
37505 the base register. At the moment, we only do this if register set with addis
37506 is dead. */
37508 void
37509 expand_fusion_p9_store (rtx *operands)
37511 rtx tmp_reg = operands[0];
37512 rtx addis_value = operands[1];
37513 rtx orig_mem = operands[2];
37514 rtx src = operands[3];
37515 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
37516 enum rtx_code plus_or_lo_sum;
37517 machine_mode target_mode = GET_MODE (orig_mem);
37518 machine_mode ptr_mode = Pmode;
37520 gcc_assert (MEM_P (orig_mem));
37522 orig_addr = XEXP (orig_mem, 0);
37523 plus_or_lo_sum = GET_CODE (orig_addr);
37524 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37526 offset = XEXP (orig_addr, 1);
37527 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37528 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37530 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
37531 UNSPEC_FUSION_P9);
37533 set = gen_rtx_SET (new_mem, new_src);
37534 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
37535 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
37536 emit_insn (insn);
37538 return;
37541 /* Return a string to fuse an addis instruction with a load using extended
37542 fusion. The address that is used is the logical address that was formed
37543 during peephole2: (lo_sum (high) (low-part))
37545 The code is complicated, so we call output_asm_insn directly, and just
37546 return "". */
37548 const char *
37549 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
37551 enum machine_mode mode = GET_MODE (reg);
37552 rtx hi;
37553 rtx lo;
37554 rtx addr;
37555 const char *load_string;
37556 int r;
37558 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
37560 mem = XEXP (mem, 0);
37561 mode = GET_MODE (mem);
37564 if (GET_CODE (reg) == SUBREG)
37566 gcc_assert (SUBREG_BYTE (reg) == 0);
37567 reg = SUBREG_REG (reg);
37570 if (!REG_P (reg))
37571 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
37573 r = REGNO (reg);
37574 if (FP_REGNO_P (r))
37576 if (mode == SFmode)
37577 load_string = "lfs";
37578 else if (mode == DFmode || mode == DImode)
37579 load_string = "lfd";
37580 else
37581 gcc_unreachable ();
37583 else if (INT_REGNO_P (r))
37585 switch (mode)
37587 case QImode:
37588 load_string = "lbz";
37589 break;
37590 case HImode:
37591 load_string = "lhz";
37592 break;
37593 case SImode:
37594 case SFmode:
37595 load_string = "lwz";
37596 break;
37597 case DImode:
37598 case DFmode:
37599 if (!TARGET_POWERPC64)
37600 gcc_unreachable ();
37601 load_string = "ld";
37602 break;
37603 default:
37604 gcc_unreachable ();
37607 else
37608 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
37610 if (!MEM_P (mem))
37611 fatal_insn ("emit_fusion_p9_load not MEM", mem);
37613 addr = XEXP (mem, 0);
37614 fusion_split_address (addr, &hi, &lo);
37616 /* Emit the addis instruction. */
37617 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
37619 /* Emit the D-form load instruction. */
37620 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
37622 return "";
37625 /* Return a string to fuse an addis instruction with a store using extended
37626 fusion. The address that is used is the logical address that was formed
37627 during peephole2: (lo_sum (high) (low-part))
37629 The code is complicated, so we call output_asm_insn directly, and just
37630 return "". */
37632 const char *
37633 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
37635 enum machine_mode mode = GET_MODE (reg);
37636 rtx hi;
37637 rtx lo;
37638 rtx addr;
37639 const char *store_string;
37640 int r;
37642 if (GET_CODE (reg) == SUBREG)
37644 gcc_assert (SUBREG_BYTE (reg) == 0);
37645 reg = SUBREG_REG (reg);
37648 if (!REG_P (reg))
37649 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
37651 r = REGNO (reg);
37652 if (FP_REGNO_P (r))
37654 if (mode == SFmode)
37655 store_string = "stfs";
37656 else if (mode == DFmode)
37657 store_string = "stfd";
37658 else
37659 gcc_unreachable ();
37661 else if (INT_REGNO_P (r))
37663 switch (mode)
37665 case QImode:
37666 store_string = "stb";
37667 break;
37668 case HImode:
37669 store_string = "sth";
37670 break;
37671 case SImode:
37672 case SFmode:
37673 store_string = "stw";
37674 break;
37675 case DImode:
37676 case DFmode:
37677 if (!TARGET_POWERPC64)
37678 gcc_unreachable ();
37679 store_string = "std";
37680 break;
37681 default:
37682 gcc_unreachable ();
37685 else
37686 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
37688 if (!MEM_P (mem))
37689 fatal_insn ("emit_fusion_p9_store not MEM", mem);
37691 addr = XEXP (mem, 0);
37692 fusion_split_address (addr, &hi, &lo);
37694 /* Emit the addis instruction. */
37695 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
37697 /* Emit the D-form load instruction. */
37698 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
37700 return "";
37704 /* Analyze vector computations and remove unnecessary doubleword
37705 swaps (xxswapdi instructions). This pass is performed only
37706 for little-endian VSX code generation.
37708 For this specific case, loads and stores of 4x32 and 2x64 vectors
37709 are inefficient. These are implemented using the lvx2dx and
37710 stvx2dx instructions, which invert the order of doublewords in
37711 a vector register. Thus the code generation inserts an xxswapdi
37712 after each such load, and prior to each such store. (For spill
37713 code after register assignment, an additional xxswapdi is inserted
37714 following each store in order to return a hard register to its
37715 unpermuted value.)
37717 The extra xxswapdi instructions reduce performance. This can be
37718 particularly bad for vectorized code. The purpose of this pass
37719 is to reduce the number of xxswapdi instructions required for
37720 correctness.
37722 The primary insight is that much code that operates on vectors
37723 does not care about the relative order of elements in a register,
37724 so long as the correct memory order is preserved. If we have
37725 a computation where all input values are provided by lvxd2x/xxswapdi
37726 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
37727 and all intermediate computations are pure SIMD (independent of
37728 element order), then all the xxswapdi's associated with the loads
37729 and stores may be removed.
37731 This pass uses some of the infrastructure and logical ideas from
37732 the "web" pass in web.c. We create maximal webs of computations
37733 fitting the description above using union-find. Each such web is
37734 then optimized by removing its unnecessary xxswapdi instructions.
37736 The pass is placed prior to global optimization so that we can
37737 perform the optimization in the safest and simplest way possible;
37738 that is, by replacing each xxswapdi insn with a register copy insn.
37739 Subsequent forward propagation will remove copies where possible.
37741 There are some operations sensitive to element order for which we
37742 can still allow the operation, provided we modify those operations.
37743 These include CONST_VECTORs, for which we must swap the first and
37744 second halves of the constant vector; and SUBREGs, for which we
37745 must adjust the byte offset to account for the swapped doublewords.
37746 A remaining opportunity would be non-immediate-form splats, for
37747 which we should adjust the selected lane of the input. We should
37748 also make code generation adjustments for sum-across operations,
37749 since this is a common vectorizer reduction.
37751 Because we run prior to the first split, we can see loads and stores
37752 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
37753 vector loads and stores that have not yet been split into a permuting
37754 load/store and a swap. (One way this can happen is with a builtin
37755 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
37756 than deleting a swap, we convert the load/store into a permuting
37757 load/store (which effectively removes the swap). */
37759 /* Notes on Permutes
37761 We do not currently handle computations that contain permutes. There
37762 is a general transformation that can be performed correctly, but it
37763 may introduce more expensive code than it replaces. To handle these
37764 would require a cost model to determine when to perform the optimization.
37765 This commentary records how this could be done if desired.
37767 The most general permute is something like this (example for V16QI):
37769 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
37770 (parallel [(const_int a0) (const_int a1)
37772 (const_int a14) (const_int a15)]))
37774 where a0,...,a15 are in [0,31] and select elements from op1 and op2
37775 to produce in the result.
37777 Regardless of mode, we can convert the PARALLEL to a mask of 16
37778 byte-element selectors. Let's call this M, with M[i] representing
37779 the ith byte-element selector value. Then if we swap doublewords
37780 throughout the computation, we can get correct behavior by replacing
37781 M with M' as follows:
37783 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
37784 { ((M[i]+8)%16)+16 : M[i] in [16,31]
37786 This seems promising at first, since we are just replacing one mask
37787 with another. But certain masks are preferable to others. If M
37788 is a mask that matches a vmrghh pattern, for example, M' certainly
37789 will not. Instead of a single vmrghh, we would generate a load of
37790 M' and a vperm. So we would need to know how many xxswapd's we can
37791 remove as a result of this transformation to determine if it's
37792 profitable; and preferably the logic would need to be aware of all
37793 the special preferable masks.
37795 Another form of permute is an UNSPEC_VPERM, in which the mask is
37796 already in a register. In some cases, this mask may be a constant
37797 that we can discover with ud-chains, in which case the above
37798 transformation is ok. However, the common usage here is for the
37799 mask to be produced by an UNSPEC_LVSL, in which case the mask
37800 cannot be known at compile time. In such a case we would have to
37801 generate several instructions to compute M' as above at run time,
37802 and a cost model is needed again.
37804 However, when the mask M for an UNSPEC_VPERM is loaded from the
37805 constant pool, we can replace M with M' as above at no cost
37806 beyond adding a constant pool entry. */
37808 /* This is based on the union-find logic in web.c. web_entry_base is
37809 defined in df.h. */
37810 class swap_web_entry : public web_entry_base
37812 public:
37813 /* Pointer to the insn. */
37814 rtx_insn *insn;
37815 /* Set if insn contains a mention of a vector register. All other
37816 fields are undefined if this field is unset. */
37817 unsigned int is_relevant : 1;
37818 /* Set if insn is a load. */
37819 unsigned int is_load : 1;
37820 /* Set if insn is a store. */
37821 unsigned int is_store : 1;
37822 /* Set if insn is a doubleword swap. This can either be a register swap
37823 or a permuting load or store (test is_load and is_store for this). */
37824 unsigned int is_swap : 1;
37825 /* Set if the insn has a live-in use of a parameter register. */
37826 unsigned int is_live_in : 1;
37827 /* Set if the insn has a live-out def of a return register. */
37828 unsigned int is_live_out : 1;
37829 /* Set if the insn contains a subreg reference of a vector register. */
37830 unsigned int contains_subreg : 1;
37831 /* Set if the insn contains a 128-bit integer operand. */
37832 unsigned int is_128_int : 1;
37833 /* Set if this is a call-insn. */
37834 unsigned int is_call : 1;
37835 /* Set if this insn does not perform a vector operation for which
37836 element order matters, or if we know how to fix it up if it does.
37837 Undefined if is_swap is set. */
37838 unsigned int is_swappable : 1;
37839 /* A nonzero value indicates what kind of special handling for this
37840 insn is required if doublewords are swapped. Undefined if
37841 is_swappable is not set. */
37842 unsigned int special_handling : 4;
37843 /* Set if the web represented by this entry cannot be optimized. */
37844 unsigned int web_not_optimizable : 1;
37845 /* Set if this insn should be deleted. */
37846 unsigned int will_delete : 1;
37849 enum special_handling_values {
37850 SH_NONE = 0,
37851 SH_CONST_VECTOR,
37852 SH_SUBREG,
37853 SH_NOSWAP_LD,
37854 SH_NOSWAP_ST,
37855 SH_EXTRACT,
37856 SH_SPLAT,
37857 SH_XXPERMDI,
37858 SH_CONCAT,
37859 SH_VPERM
37862 /* Union INSN with all insns containing definitions that reach USE.
37863 Detect whether USE is live-in to the current function. */
37864 static void
37865 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
37867 struct df_link *link = DF_REF_CHAIN (use);
37869 if (!link)
37870 insn_entry[INSN_UID (insn)].is_live_in = 1;
37872 while (link)
37874 if (DF_REF_IS_ARTIFICIAL (link->ref))
37875 insn_entry[INSN_UID (insn)].is_live_in = 1;
37877 if (DF_REF_INSN_INFO (link->ref))
37879 rtx def_insn = DF_REF_INSN (link->ref);
37880 (void)unionfind_union (insn_entry + INSN_UID (insn),
37881 insn_entry + INSN_UID (def_insn));
37884 link = link->next;
37888 /* Union INSN with all insns containing uses reached from DEF.
37889 Detect whether DEF is live-out from the current function. */
37890 static void
37891 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
37893 struct df_link *link = DF_REF_CHAIN (def);
37895 if (!link)
37896 insn_entry[INSN_UID (insn)].is_live_out = 1;
37898 while (link)
37900 /* This could be an eh use or some other artificial use;
37901 we treat these all the same (killing the optimization). */
37902 if (DF_REF_IS_ARTIFICIAL (link->ref))
37903 insn_entry[INSN_UID (insn)].is_live_out = 1;
37905 if (DF_REF_INSN_INFO (link->ref))
37907 rtx use_insn = DF_REF_INSN (link->ref);
37908 (void)unionfind_union (insn_entry + INSN_UID (insn),
37909 insn_entry + INSN_UID (use_insn));
37912 link = link->next;
37916 /* Return 1 iff INSN is a load insn, including permuting loads that
37917 represent an lvxd2x instruction; else return 0. */
37918 static unsigned int
37919 insn_is_load_p (rtx insn)
37921 rtx body = PATTERN (insn);
37923 if (GET_CODE (body) == SET)
37925 if (GET_CODE (SET_SRC (body)) == MEM)
37926 return 1;
37928 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
37929 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
37930 return 1;
37932 return 0;
37935 if (GET_CODE (body) != PARALLEL)
37936 return 0;
37938 rtx set = XVECEXP (body, 0, 0);
37940 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
37941 return 1;
37943 return 0;
37946 /* Return 1 iff INSN is a store insn, including permuting stores that
37947 represent an stvxd2x instruction; else return 0. */
37948 static unsigned int
37949 insn_is_store_p (rtx insn)
37951 rtx body = PATTERN (insn);
37952 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
37953 return 1;
37954 if (GET_CODE (body) != PARALLEL)
37955 return 0;
37956 rtx set = XVECEXP (body, 0, 0);
37957 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
37958 return 1;
37959 return 0;
37962 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
37963 a permuting load, or a permuting store. */
37964 static unsigned int
37965 insn_is_swap_p (rtx insn)
37967 rtx body = PATTERN (insn);
37968 if (GET_CODE (body) != SET)
37969 return 0;
37970 rtx rhs = SET_SRC (body);
37971 if (GET_CODE (rhs) != VEC_SELECT)
37972 return 0;
37973 rtx parallel = XEXP (rhs, 1);
37974 if (GET_CODE (parallel) != PARALLEL)
37975 return 0;
37976 unsigned int len = XVECLEN (parallel, 0);
37977 if (len != 2 && len != 4 && len != 8 && len != 16)
37978 return 0;
37979 for (unsigned int i = 0; i < len / 2; ++i)
37981 rtx op = XVECEXP (parallel, 0, i);
37982 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
37983 return 0;
37985 for (unsigned int i = len / 2; i < len; ++i)
37987 rtx op = XVECEXP (parallel, 0, i);
37988 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
37989 return 0;
37991 return 1;
37994 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
37995 static bool
37996 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
37998 unsigned uid = INSN_UID (insn);
37999 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
38000 return false;
38002 /* Find the unique use in the swap and locate its def. If the def
38003 isn't unique, punt. */
38004 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38005 df_ref use;
38006 FOR_EACH_INSN_INFO_USE (use, insn_info)
38008 struct df_link *def_link = DF_REF_CHAIN (use);
38009 if (!def_link || def_link->next)
38010 return false;
38012 rtx def_insn = DF_REF_INSN (def_link->ref);
38013 unsigned uid2 = INSN_UID (def_insn);
38014 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
38015 return false;
38017 rtx body = PATTERN (def_insn);
38018 if (GET_CODE (body) != SET
38019 || GET_CODE (SET_SRC (body)) != VEC_SELECT
38020 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
38021 return false;
38023 rtx mem = XEXP (SET_SRC (body), 0);
38024 rtx base_reg = XEXP (mem, 0);
38026 df_ref base_use;
38027 insn_info = DF_INSN_INFO_GET (def_insn);
38028 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
38030 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
38031 continue;
38033 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
38034 if (!base_def_link || base_def_link->next)
38035 return false;
38037 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
38038 rtx tocrel_body = PATTERN (tocrel_insn);
38039 rtx base, offset;
38040 if (GET_CODE (tocrel_body) != SET)
38041 return false;
38042 /* There is an extra level of indirection for small/large
38043 code models. */
38044 rtx tocrel_expr = SET_SRC (tocrel_body);
38045 if (GET_CODE (tocrel_expr) == MEM)
38046 tocrel_expr = XEXP (tocrel_expr, 0);
38047 if (!toc_relative_expr_p (tocrel_expr, false))
38048 return false;
38049 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
38050 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
38051 return false;
38054 return true;
38057 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
38058 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
38059 static bool
38060 v2df_reduction_p (rtx op)
38062 if (GET_MODE (op) != V2DFmode)
38063 return false;
38065 enum rtx_code code = GET_CODE (op);
38066 if (code != PLUS && code != SMIN && code != SMAX)
38067 return false;
38069 rtx concat = XEXP (op, 0);
38070 if (GET_CODE (concat) != VEC_CONCAT)
38071 return false;
38073 rtx select0 = XEXP (concat, 0);
38074 rtx select1 = XEXP (concat, 1);
38075 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
38076 return false;
38078 rtx reg0 = XEXP (select0, 0);
38079 rtx reg1 = XEXP (select1, 0);
38080 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
38081 return false;
38083 rtx parallel0 = XEXP (select0, 1);
38084 rtx parallel1 = XEXP (select1, 1);
38085 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
38086 return false;
38088 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
38089 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
38090 return false;
38092 return true;
38095 /* Return 1 iff OP is an operand that will not be affected by having
38096 vector doublewords swapped in memory. */
38097 static unsigned int
38098 rtx_is_swappable_p (rtx op, unsigned int *special)
38100 enum rtx_code code = GET_CODE (op);
38101 int i, j;
38102 rtx parallel;
38104 switch (code)
38106 case LABEL_REF:
38107 case SYMBOL_REF:
38108 case CLOBBER:
38109 case REG:
38110 return 1;
38112 case VEC_CONCAT:
38113 case ASM_INPUT:
38114 case ASM_OPERANDS:
38115 return 0;
38117 case CONST_VECTOR:
38119 *special = SH_CONST_VECTOR;
38120 return 1;
38123 case VEC_DUPLICATE:
38124 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
38125 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
38126 it represents a vector splat for which we can do special
38127 handling. */
38128 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
38129 return 1;
38130 else if (GET_CODE (XEXP (op, 0)) == REG
38131 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
38132 /* This catches V2DF and V2DI splat, at a minimum. */
38133 return 1;
38134 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
38135 /* If the duplicated item is from a select, defer to the select
38136 processing to see if we can change the lane for the splat. */
38137 return rtx_is_swappable_p (XEXP (op, 0), special);
38138 else
38139 return 0;
38141 case VEC_SELECT:
38142 /* A vec_extract operation is ok if we change the lane. */
38143 if (GET_CODE (XEXP (op, 0)) == REG
38144 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
38145 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38146 && XVECLEN (parallel, 0) == 1
38147 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
38149 *special = SH_EXTRACT;
38150 return 1;
38152 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
38153 XXPERMDI is a swap operation, it will be identified by
38154 insn_is_swap_p and therefore we won't get here. */
38155 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
38156 && (GET_MODE (XEXP (op, 0)) == V4DFmode
38157 || GET_MODE (XEXP (op, 0)) == V4DImode)
38158 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
38159 && XVECLEN (parallel, 0) == 2
38160 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
38161 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
38163 *special = SH_XXPERMDI;
38164 return 1;
38166 else if (v2df_reduction_p (op))
38167 return 1;
38168 else
38169 return 0;
38171 case UNSPEC:
38173 /* Various operations are unsafe for this optimization, at least
38174 without significant additional work. Permutes are obviously
38175 problematic, as both the permute control vector and the ordering
38176 of the target values are invalidated by doubleword swapping.
38177 Vector pack and unpack modify the number of vector lanes.
38178 Merge-high/low will not operate correctly on swapped operands.
38179 Vector shifts across element boundaries are clearly uncool,
38180 as are vector select and concatenate operations. Vector
38181 sum-across instructions define one operand with a specific
38182 order-dependent element, so additional fixup code would be
38183 needed to make those work. Vector set and non-immediate-form
38184 vector splat are element-order sensitive. A few of these
38185 cases might be workable with special handling if required.
38186 Adding cost modeling would be appropriate in some cases. */
38187 int val = XINT (op, 1);
38188 switch (val)
38190 default:
38191 break;
38192 case UNSPEC_VMRGH_DIRECT:
38193 case UNSPEC_VMRGL_DIRECT:
38194 case UNSPEC_VPACK_SIGN_SIGN_SAT:
38195 case UNSPEC_VPACK_SIGN_UNS_SAT:
38196 case UNSPEC_VPACK_UNS_UNS_MOD:
38197 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
38198 case UNSPEC_VPACK_UNS_UNS_SAT:
38199 case UNSPEC_VPERM:
38200 case UNSPEC_VPERM_UNS:
38201 case UNSPEC_VPERMHI:
38202 case UNSPEC_VPERMSI:
38203 case UNSPEC_VPKPX:
38204 case UNSPEC_VSLDOI:
38205 case UNSPEC_VSLO:
38206 case UNSPEC_VSRO:
38207 case UNSPEC_VSUM2SWS:
38208 case UNSPEC_VSUM4S:
38209 case UNSPEC_VSUM4UBS:
38210 case UNSPEC_VSUMSWS:
38211 case UNSPEC_VSUMSWS_DIRECT:
38212 case UNSPEC_VSX_CONCAT:
38213 case UNSPEC_VSX_SET:
38214 case UNSPEC_VSX_SLDWI:
38215 case UNSPEC_VUNPACK_HI_SIGN:
38216 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
38217 case UNSPEC_VUNPACK_LO_SIGN:
38218 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
38219 case UNSPEC_VUPKHPX:
38220 case UNSPEC_VUPKHS_V4SF:
38221 case UNSPEC_VUPKHU_V4SF:
38222 case UNSPEC_VUPKLPX:
38223 case UNSPEC_VUPKLS_V4SF:
38224 case UNSPEC_VUPKLU_V4SF:
38225 case UNSPEC_VSX_CVDPSPN:
38226 case UNSPEC_VSX_CVSPDP:
38227 case UNSPEC_VSX_CVSPDPN:
38228 return 0;
38229 case UNSPEC_VSPLT_DIRECT:
38230 *special = SH_SPLAT;
38231 return 1;
38232 case UNSPEC_REDUC_PLUS:
38233 case UNSPEC_REDUC:
38234 return 1;
38238 default:
38239 break;
38242 const char *fmt = GET_RTX_FORMAT (code);
38243 int ok = 1;
38245 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38246 if (fmt[i] == 'e' || fmt[i] == 'u')
38248 unsigned int special_op = SH_NONE;
38249 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
38250 if (special_op == SH_NONE)
38251 continue;
38252 /* Ensure we never have two kinds of special handling
38253 for the same insn. */
38254 if (*special != SH_NONE && *special != special_op)
38255 return 0;
38256 *special = special_op;
38258 else if (fmt[i] == 'E')
38259 for (j = 0; j < XVECLEN (op, i); ++j)
38261 unsigned int special_op = SH_NONE;
38262 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
38263 if (special_op == SH_NONE)
38264 continue;
38265 /* Ensure we never have two kinds of special handling
38266 for the same insn. */
38267 if (*special != SH_NONE && *special != special_op)
38268 return 0;
38269 *special = special_op;
38272 return ok;
38275 /* Return 1 iff INSN is an operand that will not be affected by
38276 having vector doublewords swapped in memory (in which case
38277 *SPECIAL is unchanged), or that can be modified to be correct
38278 if vector doublewords are swapped in memory (in which case
38279 *SPECIAL is changed to a value indicating how). */
38280 static unsigned int
38281 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
38282 unsigned int *special)
38284 /* Calls are always bad. */
38285 if (GET_CODE (insn) == CALL_INSN)
38286 return 0;
38288 /* Loads and stores seen here are not permuting, but we can still
38289 fix them up by converting them to permuting ones. Exceptions:
38290 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
38291 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
38292 for the SET source. Also we must now make an exception for lvx
38293 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
38294 explicit "& -16") since this leads to unrecognizable insns. */
38295 rtx body = PATTERN (insn);
38296 int i = INSN_UID (insn);
38298 if (insn_entry[i].is_load)
38300 if (GET_CODE (body) == SET)
38302 rtx rhs = SET_SRC (body);
38303 gcc_assert (GET_CODE (rhs) == MEM);
38304 if (GET_CODE (XEXP (rhs, 0)) == AND)
38305 return 0;
38307 *special = SH_NOSWAP_LD;
38308 return 1;
38310 else
38311 return 0;
38314 if (insn_entry[i].is_store)
38316 if (GET_CODE (body) == SET
38317 && GET_CODE (SET_SRC (body)) != UNSPEC)
38319 rtx lhs = SET_DEST (body);
38320 gcc_assert (GET_CODE (lhs) == MEM);
38321 if (GET_CODE (XEXP (lhs, 0)) == AND)
38322 return 0;
38324 *special = SH_NOSWAP_ST;
38325 return 1;
38327 else
38328 return 0;
38331 /* A convert to single precision can be left as is provided that
38332 all of its uses are in xxspltw instructions that splat BE element
38333 zero. */
38334 if (GET_CODE (body) == SET
38335 && GET_CODE (SET_SRC (body)) == UNSPEC
38336 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
38338 df_ref def;
38339 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38341 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38343 struct df_link *link = DF_REF_CHAIN (def);
38344 if (!link)
38345 return 0;
38347 for (; link; link = link->next) {
38348 rtx use_insn = DF_REF_INSN (link->ref);
38349 rtx use_body = PATTERN (use_insn);
38350 if (GET_CODE (use_body) != SET
38351 || GET_CODE (SET_SRC (use_body)) != UNSPEC
38352 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
38353 || XEXP (XEXP (SET_SRC (use_body), 0), 1) != const0_rtx)
38354 return 0;
38358 return 1;
38361 /* A concatenation of two doublewords is ok if we reverse the
38362 order of the inputs. */
38363 if (GET_CODE (body) == SET
38364 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
38365 && (GET_MODE (SET_SRC (body)) == V2DFmode
38366 || GET_MODE (SET_SRC (body)) == V2DImode))
38368 *special = SH_CONCAT;
38369 return 1;
38372 /* V2DF reductions are always swappable. */
38373 if (GET_CODE (body) == PARALLEL)
38375 rtx expr = XVECEXP (body, 0, 0);
38376 if (GET_CODE (expr) == SET
38377 && v2df_reduction_p (SET_SRC (expr)))
38378 return 1;
38381 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
38382 constant pool. */
38383 if (GET_CODE (body) == SET
38384 && GET_CODE (SET_SRC (body)) == UNSPEC
38385 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
38386 && XVECLEN (SET_SRC (body), 0) == 3
38387 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
38389 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
38390 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38391 df_ref use;
38392 FOR_EACH_INSN_INFO_USE (use, insn_info)
38393 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
38395 struct df_link *def_link = DF_REF_CHAIN (use);
38396 /* Punt if multiple definitions for this reg. */
38397 if (def_link && !def_link->next &&
38398 const_load_sequence_p (insn_entry,
38399 DF_REF_INSN (def_link->ref)))
38401 *special = SH_VPERM;
38402 return 1;
38407 /* Otherwise check the operands for vector lane violations. */
38408 return rtx_is_swappable_p (body, special);
38411 enum chain_purpose { FOR_LOADS, FOR_STORES };
38413 /* Return true if the UD or DU chain headed by LINK is non-empty,
38414 and every entry on the chain references an insn that is a
38415 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
38416 register swap must have only permuting loads as reaching defs.
38417 If PURPOSE is FOR_STORES, each such register swap must have only
38418 register swaps or permuting stores as reached uses. */
38419 static bool
38420 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
38421 enum chain_purpose purpose)
38423 if (!link)
38424 return false;
38426 for (; link; link = link->next)
38428 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
38429 continue;
38431 if (DF_REF_IS_ARTIFICIAL (link->ref))
38432 return false;
38434 rtx reached_insn = DF_REF_INSN (link->ref);
38435 unsigned uid = INSN_UID (reached_insn);
38436 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
38438 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
38439 || insn_entry[uid].is_store)
38440 return false;
38442 if (purpose == FOR_LOADS)
38444 df_ref use;
38445 FOR_EACH_INSN_INFO_USE (use, insn_info)
38447 struct df_link *swap_link = DF_REF_CHAIN (use);
38449 while (swap_link)
38451 if (DF_REF_IS_ARTIFICIAL (link->ref))
38452 return false;
38454 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
38455 unsigned uid2 = INSN_UID (swap_def_insn);
38457 /* Only permuting loads are allowed. */
38458 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
38459 return false;
38461 swap_link = swap_link->next;
38465 else if (purpose == FOR_STORES)
38467 df_ref def;
38468 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38470 struct df_link *swap_link = DF_REF_CHAIN (def);
38472 while (swap_link)
38474 if (DF_REF_IS_ARTIFICIAL (link->ref))
38475 return false;
38477 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
38478 unsigned uid2 = INSN_UID (swap_use_insn);
38480 /* Permuting stores or register swaps are allowed. */
38481 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
38482 return false;
38484 swap_link = swap_link->next;
38490 return true;
38493 /* Mark the xxswapdi instructions associated with permuting loads and
38494 stores for removal. Note that we only flag them for deletion here,
38495 as there is a possibility of a swap being reached from multiple
38496 loads, etc. */
38497 static void
38498 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
38500 rtx insn = insn_entry[i].insn;
38501 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38503 if (insn_entry[i].is_load)
38505 df_ref def;
38506 FOR_EACH_INSN_INFO_DEF (def, insn_info)
38508 struct df_link *link = DF_REF_CHAIN (def);
38510 /* We know by now that these are swaps, so we can delete
38511 them confidently. */
38512 while (link)
38514 rtx use_insn = DF_REF_INSN (link->ref);
38515 insn_entry[INSN_UID (use_insn)].will_delete = 1;
38516 link = link->next;
38520 else if (insn_entry[i].is_store)
38522 df_ref use;
38523 FOR_EACH_INSN_INFO_USE (use, insn_info)
38525 /* Ignore uses for addressability. */
38526 machine_mode mode = GET_MODE (DF_REF_REG (use));
38527 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
38528 continue;
38530 struct df_link *link = DF_REF_CHAIN (use);
38532 /* We know by now that these are swaps, so we can delete
38533 them confidently. */
38534 while (link)
38536 rtx def_insn = DF_REF_INSN (link->ref);
38537 insn_entry[INSN_UID (def_insn)].will_delete = 1;
38538 link = link->next;
38544 /* OP is either a CONST_VECTOR or an expression containing one.
38545 Swap the first half of the vector with the second in the first
38546 case. Recurse to find it in the second. */
38547 static void
38548 swap_const_vector_halves (rtx op)
38550 int i;
38551 enum rtx_code code = GET_CODE (op);
38552 if (GET_CODE (op) == CONST_VECTOR)
38554 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
38555 for (i = 0; i < half_units; ++i)
38557 rtx temp = CONST_VECTOR_ELT (op, i);
38558 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
38559 CONST_VECTOR_ELT (op, i + half_units) = temp;
38562 else
38564 int j;
38565 const char *fmt = GET_RTX_FORMAT (code);
38566 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38567 if (fmt[i] == 'e' || fmt[i] == 'u')
38568 swap_const_vector_halves (XEXP (op, i));
38569 else if (fmt[i] == 'E')
38570 for (j = 0; j < XVECLEN (op, i); ++j)
38571 swap_const_vector_halves (XVECEXP (op, i, j));
38575 /* Find all subregs of a vector expression that perform a narrowing,
38576 and adjust the subreg index to account for doubleword swapping. */
38577 static void
38578 adjust_subreg_index (rtx op)
38580 enum rtx_code code = GET_CODE (op);
38581 if (code == SUBREG
38582 && (GET_MODE_SIZE (GET_MODE (op))
38583 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
38585 unsigned int index = SUBREG_BYTE (op);
38586 if (index < 8)
38587 index += 8;
38588 else
38589 index -= 8;
38590 SUBREG_BYTE (op) = index;
38593 const char *fmt = GET_RTX_FORMAT (code);
38594 int i,j;
38595 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
38596 if (fmt[i] == 'e' || fmt[i] == 'u')
38597 adjust_subreg_index (XEXP (op, i));
38598 else if (fmt[i] == 'E')
38599 for (j = 0; j < XVECLEN (op, i); ++j)
38600 adjust_subreg_index (XVECEXP (op, i, j));
38603 /* Convert the non-permuting load INSN to a permuting one. */
38604 static void
38605 permute_load (rtx_insn *insn)
38607 rtx body = PATTERN (insn);
38608 rtx mem_op = SET_SRC (body);
38609 rtx tgt_reg = SET_DEST (body);
38610 machine_mode mode = GET_MODE (tgt_reg);
38611 int n_elts = GET_MODE_NUNITS (mode);
38612 int half_elts = n_elts / 2;
38613 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
38614 int i, j;
38615 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
38616 XVECEXP (par, 0, i) = GEN_INT (j);
38617 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
38618 XVECEXP (par, 0, i) = GEN_INT (j);
38619 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
38620 SET_SRC (body) = sel;
38621 INSN_CODE (insn) = -1; /* Force re-recognition. */
38622 df_insn_rescan (insn);
38624 if (dump_file)
38625 fprintf (dump_file, "Replacing load %d with permuted load\n",
38626 INSN_UID (insn));
38629 /* Convert the non-permuting store INSN to a permuting one. */
38630 static void
38631 permute_store (rtx_insn *insn)
38633 rtx body = PATTERN (insn);
38634 rtx src_reg = SET_SRC (body);
38635 machine_mode mode = GET_MODE (src_reg);
38636 int n_elts = GET_MODE_NUNITS (mode);
38637 int half_elts = n_elts / 2;
38638 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
38639 int i, j;
38640 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
38641 XVECEXP (par, 0, i) = GEN_INT (j);
38642 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
38643 XVECEXP (par, 0, i) = GEN_INT (j);
38644 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
38645 SET_SRC (body) = sel;
38646 INSN_CODE (insn) = -1; /* Force re-recognition. */
38647 df_insn_rescan (insn);
38649 if (dump_file)
38650 fprintf (dump_file, "Replacing store %d with permuted store\n",
38651 INSN_UID (insn));
38654 /* Given OP that contains a vector extract operation, adjust the index
38655 of the extracted lane to account for the doubleword swap. */
38656 static void
38657 adjust_extract (rtx_insn *insn)
38659 rtx pattern = PATTERN (insn);
38660 if (GET_CODE (pattern) == PARALLEL)
38661 pattern = XVECEXP (pattern, 0, 0);
38662 rtx src = SET_SRC (pattern);
38663 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
38664 account for that. */
38665 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
38666 rtx par = XEXP (sel, 1);
38667 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
38668 int lane = INTVAL (XVECEXP (par, 0, 0));
38669 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
38670 XVECEXP (par, 0, 0) = GEN_INT (lane);
38671 INSN_CODE (insn) = -1; /* Force re-recognition. */
38672 df_insn_rescan (insn);
38674 if (dump_file)
38675 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
38678 /* Given OP that contains a vector direct-splat operation, adjust the index
38679 of the source lane to account for the doubleword swap. */
38680 static void
38681 adjust_splat (rtx_insn *insn)
38683 rtx body = PATTERN (insn);
38684 rtx unspec = XEXP (body, 1);
38685 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
38686 int lane = INTVAL (XVECEXP (unspec, 0, 1));
38687 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
38688 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
38689 INSN_CODE (insn) = -1; /* Force re-recognition. */
38690 df_insn_rescan (insn);
38692 if (dump_file)
38693 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
38696 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
38697 swap), reverse the order of the source operands and adjust the indices
38698 of the source lanes to account for doubleword reversal. */
38699 static void
38700 adjust_xxpermdi (rtx_insn *insn)
38702 rtx set = PATTERN (insn);
38703 rtx select = XEXP (set, 1);
38704 rtx concat = XEXP (select, 0);
38705 rtx src0 = XEXP (concat, 0);
38706 XEXP (concat, 0) = XEXP (concat, 1);
38707 XEXP (concat, 1) = src0;
38708 rtx parallel = XEXP (select, 1);
38709 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
38710 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
38711 int new_lane0 = 3 - lane1;
38712 int new_lane1 = 3 - lane0;
38713 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
38714 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
38715 INSN_CODE (insn) = -1; /* Force re-recognition. */
38716 df_insn_rescan (insn);
38718 if (dump_file)
38719 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
38722 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
38723 reverse the order of those inputs. */
38724 static void
38725 adjust_concat (rtx_insn *insn)
38727 rtx set = PATTERN (insn);
38728 rtx concat = XEXP (set, 1);
38729 rtx src0 = XEXP (concat, 0);
38730 XEXP (concat, 0) = XEXP (concat, 1);
38731 XEXP (concat, 1) = src0;
38732 INSN_CODE (insn) = -1; /* Force re-recognition. */
38733 df_insn_rescan (insn);
38735 if (dump_file)
38736 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
38739 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
38740 constant pool to reflect swapped doublewords. */
38741 static void
38742 adjust_vperm (rtx_insn *insn)
38744 /* We previously determined that the UNSPEC_VPERM was fed by a
38745 swap of a swapping load of a TOC-relative constant pool symbol.
38746 Find the MEM in the swapping load and replace it with a MEM for
38747 the adjusted mask constant. */
38748 rtx set = PATTERN (insn);
38749 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
38751 /* Find the swap. */
38752 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
38753 df_ref use;
38754 rtx_insn *swap_insn = 0;
38755 FOR_EACH_INSN_INFO_USE (use, insn_info)
38756 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
38758 struct df_link *def_link = DF_REF_CHAIN (use);
38759 gcc_assert (def_link && !def_link->next);
38760 swap_insn = DF_REF_INSN (def_link->ref);
38761 break;
38763 gcc_assert (swap_insn);
38765 /* Find the load. */
38766 insn_info = DF_INSN_INFO_GET (swap_insn);
38767 rtx_insn *load_insn = 0;
38768 FOR_EACH_INSN_INFO_USE (use, insn_info)
38770 struct df_link *def_link = DF_REF_CHAIN (use);
38771 gcc_assert (def_link && !def_link->next);
38772 load_insn = DF_REF_INSN (def_link->ref);
38773 break;
38775 gcc_assert (load_insn);
38777 /* Find the TOC-relative symbol access. */
38778 insn_info = DF_INSN_INFO_GET (load_insn);
38779 rtx_insn *tocrel_insn = 0;
38780 FOR_EACH_INSN_INFO_USE (use, insn_info)
38782 struct df_link *def_link = DF_REF_CHAIN (use);
38783 gcc_assert (def_link && !def_link->next);
38784 tocrel_insn = DF_REF_INSN (def_link->ref);
38785 break;
38787 gcc_assert (tocrel_insn);
38789 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
38790 to set tocrel_base; otherwise it would be unnecessary as we've
38791 already established it will return true. */
38792 rtx base, offset;
38793 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
38794 /* There is an extra level of indirection for small/large code models. */
38795 if (GET_CODE (tocrel_expr) == MEM)
38796 tocrel_expr = XEXP (tocrel_expr, 0);
38797 if (!toc_relative_expr_p (tocrel_expr, false))
38798 gcc_unreachable ();
38799 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
38800 rtx const_vector = get_pool_constant (base);
38801 /* With the extra indirection, get_pool_constant will produce the
38802 real constant from the reg_equal expression, so get the real
38803 constant. */
38804 if (GET_CODE (const_vector) == SYMBOL_REF)
38805 const_vector = get_pool_constant (const_vector);
38806 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
38808 /* Create an adjusted mask from the initial mask. */
38809 unsigned int new_mask[16], i, val;
38810 for (i = 0; i < 16; ++i) {
38811 val = INTVAL (XVECEXP (const_vector, 0, i));
38812 if (val < 16)
38813 new_mask[i] = (val + 8) % 16;
38814 else
38815 new_mask[i] = ((val + 8) % 16) + 16;
38818 /* Create a new CONST_VECTOR and a MEM that references it. */
38819 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
38820 for (i = 0; i < 16; ++i)
38821 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
38822 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
38823 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
38824 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
38825 can't recognize. Force the SYMBOL_REF into a register. */
38826 if (!REG_P (XEXP (new_mem, 0))) {
38827 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
38828 XEXP (new_mem, 0) = base_reg;
38829 /* Move the newly created insn ahead of the load insn. */
38830 rtx_insn *force_insn = get_last_insn ();
38831 remove_insn (force_insn);
38832 rtx_insn *before_load_insn = PREV_INSN (load_insn);
38833 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
38834 df_insn_rescan (before_load_insn);
38835 df_insn_rescan (force_insn);
38838 /* Replace the MEM in the load instruction and rescan it. */
38839 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
38840 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
38841 df_insn_rescan (load_insn);
38843 if (dump_file)
38844 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
38847 /* The insn described by INSN_ENTRY[I] can be swapped, but only
38848 with special handling. Take care of that here. */
38849 static void
38850 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
38852 rtx_insn *insn = insn_entry[i].insn;
38853 rtx body = PATTERN (insn);
38855 switch (insn_entry[i].special_handling)
38857 default:
38858 gcc_unreachable ();
38859 case SH_CONST_VECTOR:
38861 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
38862 gcc_assert (GET_CODE (body) == SET);
38863 rtx rhs = SET_SRC (body);
38864 swap_const_vector_halves (rhs);
38865 if (dump_file)
38866 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
38867 break;
38869 case SH_SUBREG:
38870 /* A subreg of the same size is already safe. For subregs that
38871 select a smaller portion of a reg, adjust the index for
38872 swapped doublewords. */
38873 adjust_subreg_index (body);
38874 if (dump_file)
38875 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
38876 break;
38877 case SH_NOSWAP_LD:
38878 /* Convert a non-permuting load to a permuting one. */
38879 permute_load (insn);
38880 break;
38881 case SH_NOSWAP_ST:
38882 /* Convert a non-permuting store to a permuting one. */
38883 permute_store (insn);
38884 break;
38885 case SH_EXTRACT:
38886 /* Change the lane on an extract operation. */
38887 adjust_extract (insn);
38888 break;
38889 case SH_SPLAT:
38890 /* Change the lane on a direct-splat operation. */
38891 adjust_splat (insn);
38892 break;
38893 case SH_XXPERMDI:
38894 /* Change the lanes on an XXPERMDI operation. */
38895 adjust_xxpermdi (insn);
38896 break;
38897 case SH_CONCAT:
38898 /* Reverse the order of a concatenation operation. */
38899 adjust_concat (insn);
38900 break;
38901 case SH_VPERM:
38902 /* Change the mask loaded from the constant pool for a VPERM. */
38903 adjust_vperm (insn);
38904 break;
38908 /* Find the insn from the Ith table entry, which is known to be a
38909 register swap Y = SWAP(X). Replace it with a copy Y = X. */
38910 static void
38911 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
38913 rtx_insn *insn = insn_entry[i].insn;
38914 rtx body = PATTERN (insn);
38915 rtx src_reg = XEXP (SET_SRC (body), 0);
38916 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
38917 rtx_insn *new_insn = emit_insn_before (copy, insn);
38918 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
38919 df_insn_rescan (new_insn);
38921 if (dump_file)
38923 unsigned int new_uid = INSN_UID (new_insn);
38924 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
38927 df_insn_delete (insn);
38928 remove_insn (insn);
38929 insn->set_deleted ();
38932 /* Dump the swap table to DUMP_FILE. */
38933 static void
38934 dump_swap_insn_table (swap_web_entry *insn_entry)
38936 int e = get_max_uid ();
38937 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
38939 for (int i = 0; i < e; ++i)
38940 if (insn_entry[i].is_relevant)
38942 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
38943 fprintf (dump_file, "%6d %6d ", i,
38944 pred_entry && pred_entry->insn
38945 ? INSN_UID (pred_entry->insn) : 0);
38946 if (insn_entry[i].is_load)
38947 fputs ("load ", dump_file);
38948 if (insn_entry[i].is_store)
38949 fputs ("store ", dump_file);
38950 if (insn_entry[i].is_swap)
38951 fputs ("swap ", dump_file);
38952 if (insn_entry[i].is_live_in)
38953 fputs ("live-in ", dump_file);
38954 if (insn_entry[i].is_live_out)
38955 fputs ("live-out ", dump_file);
38956 if (insn_entry[i].contains_subreg)
38957 fputs ("subreg ", dump_file);
38958 if (insn_entry[i].is_128_int)
38959 fputs ("int128 ", dump_file);
38960 if (insn_entry[i].is_call)
38961 fputs ("call ", dump_file);
38962 if (insn_entry[i].is_swappable)
38964 fputs ("swappable ", dump_file);
38965 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
38966 fputs ("special:constvec ", dump_file);
38967 else if (insn_entry[i].special_handling == SH_SUBREG)
38968 fputs ("special:subreg ", dump_file);
38969 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
38970 fputs ("special:load ", dump_file);
38971 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
38972 fputs ("special:store ", dump_file);
38973 else if (insn_entry[i].special_handling == SH_EXTRACT)
38974 fputs ("special:extract ", dump_file);
38975 else if (insn_entry[i].special_handling == SH_SPLAT)
38976 fputs ("special:splat ", dump_file);
38977 else if (insn_entry[i].special_handling == SH_XXPERMDI)
38978 fputs ("special:xxpermdi ", dump_file);
38979 else if (insn_entry[i].special_handling == SH_CONCAT)
38980 fputs ("special:concat ", dump_file);
38981 else if (insn_entry[i].special_handling == SH_VPERM)
38982 fputs ("special:vperm ", dump_file);
38984 if (insn_entry[i].web_not_optimizable)
38985 fputs ("unoptimizable ", dump_file);
38986 if (insn_entry[i].will_delete)
38987 fputs ("delete ", dump_file);
38988 fputs ("\n", dump_file);
38990 fputs ("\n", dump_file);
38993 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
38994 Here RTX is an (& addr (const_int -16)). Always return a new copy
38995 to avoid problems with combine. */
38996 static rtx
38997 alignment_with_canonical_addr (rtx align)
38999 rtx canon;
39000 rtx addr = XEXP (align, 0);
39002 if (REG_P (addr))
39003 canon = addr;
39005 else if (GET_CODE (addr) == PLUS)
39007 rtx addrop0 = XEXP (addr, 0);
39008 rtx addrop1 = XEXP (addr, 1);
39010 if (!REG_P (addrop0))
39011 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
39013 if (!REG_P (addrop1))
39014 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
39016 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
39019 else
39020 canon = force_reg (GET_MODE (addr), addr);
39022 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
39025 /* Check whether an rtx is an alignment mask, and if so, return
39026 a fully-expanded rtx for the masking operation. */
39027 static rtx
39028 alignment_mask (rtx_insn *insn)
39030 rtx body = PATTERN (insn);
39032 if (GET_CODE (body) != SET
39033 || GET_CODE (SET_SRC (body)) != AND
39034 || !REG_P (XEXP (SET_SRC (body), 0)))
39035 return 0;
39037 rtx mask = XEXP (SET_SRC (body), 1);
39039 if (GET_CODE (mask) == CONST_INT)
39041 if (INTVAL (mask) == -16)
39042 return alignment_with_canonical_addr (SET_SRC (body));
39043 else
39044 return 0;
39047 if (!REG_P (mask))
39048 return 0;
39050 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39051 df_ref use;
39052 rtx real_mask = 0;
39054 FOR_EACH_INSN_INFO_USE (use, insn_info)
39056 if (!rtx_equal_p (DF_REF_REG (use), mask))
39057 continue;
39059 struct df_link *def_link = DF_REF_CHAIN (use);
39060 if (!def_link || def_link->next)
39061 return 0;
39063 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
39064 rtx const_body = PATTERN (const_insn);
39065 if (GET_CODE (const_body) != SET)
39066 return 0;
39068 real_mask = SET_SRC (const_body);
39070 if (GET_CODE (real_mask) != CONST_INT
39071 || INTVAL (real_mask) != -16)
39072 return 0;
39075 if (real_mask == 0)
39076 return 0;
39078 return alignment_with_canonical_addr (SET_SRC (body));
39081 /* Given INSN that's a load or store based at BASE_REG, look for a
39082 feeding computation that aligns its address on a 16-byte boundary. */
39083 static rtx
39084 find_alignment_op (rtx_insn *insn, rtx base_reg)
39086 df_ref base_use;
39087 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39088 rtx and_operation = 0;
39090 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
39092 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
39093 continue;
39095 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
39096 if (!base_def_link || base_def_link->next)
39097 break;
39099 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
39100 and_operation = alignment_mask (and_insn);
39101 if (and_operation != 0)
39102 break;
39105 return and_operation;
39108 struct del_info { bool replace; rtx_insn *replace_insn; };
39110 /* If INSN is the load for an lvx pattern, put it in canonical form. */
39111 static void
39112 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
39114 rtx body = PATTERN (insn);
39115 gcc_assert (GET_CODE (body) == SET
39116 && GET_CODE (SET_SRC (body)) == VEC_SELECT
39117 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
39119 rtx mem = XEXP (SET_SRC (body), 0);
39120 rtx base_reg = XEXP (mem, 0);
39122 rtx and_operation = find_alignment_op (insn, base_reg);
39124 if (and_operation != 0)
39126 df_ref def;
39127 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39128 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39130 struct df_link *link = DF_REF_CHAIN (def);
39131 if (!link || link->next)
39132 break;
39134 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39135 if (!insn_is_swap_p (swap_insn)
39136 || insn_is_load_p (swap_insn)
39137 || insn_is_store_p (swap_insn))
39138 break;
39140 /* Expected lvx pattern found. Change the swap to
39141 a copy, and propagate the AND operation into the
39142 load. */
39143 to_delete[INSN_UID (swap_insn)].replace = true;
39144 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39146 XEXP (mem, 0) = and_operation;
39147 SET_SRC (body) = mem;
39148 INSN_CODE (insn) = -1; /* Force re-recognition. */
39149 df_insn_rescan (insn);
39151 if (dump_file)
39152 fprintf (dump_file, "lvx opportunity found at %d\n",
39153 INSN_UID (insn));
39158 /* If INSN is the store for an stvx pattern, put it in canonical form. */
39159 static void
39160 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
39162 rtx body = PATTERN (insn);
39163 gcc_assert (GET_CODE (body) == SET
39164 && GET_CODE (SET_DEST (body)) == MEM
39165 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
39166 rtx mem = SET_DEST (body);
39167 rtx base_reg = XEXP (mem, 0);
39169 rtx and_operation = find_alignment_op (insn, base_reg);
39171 if (and_operation != 0)
39173 rtx src_reg = XEXP (SET_SRC (body), 0);
39174 df_ref src_use;
39175 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39176 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
39178 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
39179 continue;
39181 struct df_link *link = DF_REF_CHAIN (src_use);
39182 if (!link || link->next)
39183 break;
39185 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
39186 if (!insn_is_swap_p (swap_insn)
39187 || insn_is_load_p (swap_insn)
39188 || insn_is_store_p (swap_insn))
39189 break;
39191 /* Expected stvx pattern found. Change the swap to
39192 a copy, and propagate the AND operation into the
39193 store. */
39194 to_delete[INSN_UID (swap_insn)].replace = true;
39195 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
39197 XEXP (mem, 0) = and_operation;
39198 SET_SRC (body) = src_reg;
39199 INSN_CODE (insn) = -1; /* Force re-recognition. */
39200 df_insn_rescan (insn);
39202 if (dump_file)
39203 fprintf (dump_file, "stvx opportunity found at %d\n",
39204 INSN_UID (insn));
39209 /* Look for patterns created from builtin lvx and stvx calls, and
39210 canonicalize them to be properly recognized as such. */
39211 static void
39212 recombine_lvx_stvx_patterns (function *fun)
39214 int i;
39215 basic_block bb;
39216 rtx_insn *insn;
39218 int num_insns = get_max_uid ();
39219 del_info *to_delete = XCNEWVEC (del_info, num_insns);
39221 FOR_ALL_BB_FN (bb, fun)
39222 FOR_BB_INSNS (bb, insn)
39224 if (!NONDEBUG_INSN_P (insn))
39225 continue;
39227 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
39228 recombine_lvx_pattern (insn, to_delete);
39229 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
39230 recombine_stvx_pattern (insn, to_delete);
39233 /* Turning swaps into copies is delayed until now, to avoid problems
39234 with deleting instructions during the insn walk. */
39235 for (i = 0; i < num_insns; i++)
39236 if (to_delete[i].replace)
39238 rtx swap_body = PATTERN (to_delete[i].replace_insn);
39239 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
39240 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
39241 rtx_insn *new_insn = emit_insn_before (copy,
39242 to_delete[i].replace_insn);
39243 set_block_for_insn (new_insn,
39244 BLOCK_FOR_INSN (to_delete[i].replace_insn));
39245 df_insn_rescan (new_insn);
39246 df_insn_delete (to_delete[i].replace_insn);
39247 remove_insn (to_delete[i].replace_insn);
39248 to_delete[i].replace_insn->set_deleted ();
39251 free (to_delete);
39254 /* Main entry point for this pass. */
39255 unsigned int
39256 rs6000_analyze_swaps (function *fun)
39258 swap_web_entry *insn_entry;
39259 basic_block bb;
39260 rtx_insn *insn, *curr_insn = 0;
39262 /* Dataflow analysis for use-def chains. */
39263 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
39264 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
39265 df_analyze ();
39266 df_set_flags (DF_DEFER_INSN_RESCAN);
39268 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
39269 recombine_lvx_stvx_patterns (fun);
39271 /* Allocate structure to represent webs of insns. */
39272 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
39274 /* Walk the insns to gather basic data. */
39275 FOR_ALL_BB_FN (bb, fun)
39276 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
39278 unsigned int uid = INSN_UID (insn);
39279 if (NONDEBUG_INSN_P (insn))
39281 insn_entry[uid].insn = insn;
39283 if (GET_CODE (insn) == CALL_INSN)
39284 insn_entry[uid].is_call = 1;
39286 /* Walk the uses and defs to see if we mention vector regs.
39287 Record any constraints on optimization of such mentions. */
39288 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39289 df_ref mention;
39290 FOR_EACH_INSN_INFO_USE (mention, insn_info)
39292 /* We use DF_REF_REAL_REG here to get inside any subregs. */
39293 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
39295 /* If a use gets its value from a call insn, it will be
39296 a hard register and will look like (reg:V4SI 3 3).
39297 The df analysis creates two mentions for GPR3 and GPR4,
39298 both DImode. We must recognize this and treat it as a
39299 vector mention to ensure the call is unioned with this
39300 use. */
39301 if (mode == DImode && DF_REF_INSN_INFO (mention))
39303 rtx feeder = DF_REF_INSN (mention);
39304 /* FIXME: It is pretty hard to get from the df mention
39305 to the mode of the use in the insn. We arbitrarily
39306 pick a vector mode here, even though the use might
39307 be a real DImode. We can be too conservative
39308 (create a web larger than necessary) because of
39309 this, so consider eventually fixing this. */
39310 if (GET_CODE (feeder) == CALL_INSN)
39311 mode = V4SImode;
39314 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
39316 insn_entry[uid].is_relevant = 1;
39317 if (mode == TImode || mode == V1TImode
39318 || FLOAT128_VECTOR_P (mode))
39319 insn_entry[uid].is_128_int = 1;
39320 if (DF_REF_INSN_INFO (mention))
39321 insn_entry[uid].contains_subreg
39322 = !rtx_equal_p (DF_REF_REG (mention),
39323 DF_REF_REAL_REG (mention));
39324 union_defs (insn_entry, insn, mention);
39327 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
39329 /* We use DF_REF_REAL_REG here to get inside any subregs. */
39330 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
39332 /* If we're loading up a hard vector register for a call,
39333 it looks like (set (reg:V4SI 9 9) (...)). The df
39334 analysis creates two mentions for GPR9 and GPR10, both
39335 DImode. So relying on the mode from the mentions
39336 isn't sufficient to ensure we union the call into the
39337 web with the parameter setup code. */
39338 if (mode == DImode && GET_CODE (insn) == SET
39339 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
39340 mode = GET_MODE (SET_DEST (insn));
39342 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
39344 insn_entry[uid].is_relevant = 1;
39345 if (mode == TImode || mode == V1TImode
39346 || FLOAT128_VECTOR_P (mode))
39347 insn_entry[uid].is_128_int = 1;
39348 if (DF_REF_INSN_INFO (mention))
39349 insn_entry[uid].contains_subreg
39350 = !rtx_equal_p (DF_REF_REG (mention),
39351 DF_REF_REAL_REG (mention));
39352 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
39353 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
39354 insn_entry[uid].is_live_out = 1;
39355 union_uses (insn_entry, insn, mention);
39359 if (insn_entry[uid].is_relevant)
39361 /* Determine if this is a load or store. */
39362 insn_entry[uid].is_load = insn_is_load_p (insn);
39363 insn_entry[uid].is_store = insn_is_store_p (insn);
39365 /* Determine if this is a doubleword swap. If not,
39366 determine whether it can legally be swapped. */
39367 if (insn_is_swap_p (insn))
39368 insn_entry[uid].is_swap = 1;
39369 else
39371 unsigned int special = SH_NONE;
39372 insn_entry[uid].is_swappable
39373 = insn_is_swappable_p (insn_entry, insn, &special);
39374 if (special != SH_NONE && insn_entry[uid].contains_subreg)
39375 insn_entry[uid].is_swappable = 0;
39376 else if (special != SH_NONE)
39377 insn_entry[uid].special_handling = special;
39378 else if (insn_entry[uid].contains_subreg)
39379 insn_entry[uid].special_handling = SH_SUBREG;
39385 if (dump_file)
39387 fprintf (dump_file, "\nSwap insn entry table when first built\n");
39388 dump_swap_insn_table (insn_entry);
39391 /* Record unoptimizable webs. */
39392 unsigned e = get_max_uid (), i;
39393 for (i = 0; i < e; ++i)
39395 if (!insn_entry[i].is_relevant)
39396 continue;
39398 swap_web_entry *root
39399 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
39401 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
39402 || (insn_entry[i].contains_subreg
39403 && insn_entry[i].special_handling != SH_SUBREG)
39404 || insn_entry[i].is_128_int || insn_entry[i].is_call
39405 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
39406 root->web_not_optimizable = 1;
39408 /* If we have loads or stores that aren't permuting then the
39409 optimization isn't appropriate. */
39410 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
39411 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
39412 root->web_not_optimizable = 1;
39414 /* If we have permuting loads or stores that are not accompanied
39415 by a register swap, the optimization isn't appropriate. */
39416 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
39418 rtx insn = insn_entry[i].insn;
39419 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39420 df_ref def;
39422 FOR_EACH_INSN_INFO_DEF (def, insn_info)
39424 struct df_link *link = DF_REF_CHAIN (def);
39426 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
39428 root->web_not_optimizable = 1;
39429 break;
39433 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
39435 rtx insn = insn_entry[i].insn;
39436 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
39437 df_ref use;
39439 FOR_EACH_INSN_INFO_USE (use, insn_info)
39441 struct df_link *link = DF_REF_CHAIN (use);
39443 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
39445 root->web_not_optimizable = 1;
39446 break;
39452 if (dump_file)
39454 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
39455 dump_swap_insn_table (insn_entry);
39458 /* For each load and store in an optimizable web (which implies
39459 the loads and stores are permuting), find the associated
39460 register swaps and mark them for removal. Due to various
39461 optimizations we may mark the same swap more than once. Also
39462 perform special handling for swappable insns that require it. */
39463 for (i = 0; i < e; ++i)
39464 if ((insn_entry[i].is_load || insn_entry[i].is_store)
39465 && insn_entry[i].is_swap)
39467 swap_web_entry* root_entry
39468 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
39469 if (!root_entry->web_not_optimizable)
39470 mark_swaps_for_removal (insn_entry, i);
39472 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
39474 swap_web_entry* root_entry
39475 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
39476 if (!root_entry->web_not_optimizable)
39477 handle_special_swappables (insn_entry, i);
39480 /* Now delete the swaps marked for removal. */
39481 for (i = 0; i < e; ++i)
39482 if (insn_entry[i].will_delete)
39483 replace_swap_with_copy (insn_entry, i);
39485 /* Clean up. */
39486 free (insn_entry);
39487 return 0;
39490 const pass_data pass_data_analyze_swaps =
39492 RTL_PASS, /* type */
39493 "swaps", /* name */
39494 OPTGROUP_NONE, /* optinfo_flags */
39495 TV_NONE, /* tv_id */
39496 0, /* properties_required */
39497 0, /* properties_provided */
39498 0, /* properties_destroyed */
39499 0, /* todo_flags_start */
39500 TODO_df_finish, /* todo_flags_finish */
39503 class pass_analyze_swaps : public rtl_opt_pass
39505 public:
39506 pass_analyze_swaps(gcc::context *ctxt)
39507 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
39510 /* opt_pass methods: */
39511 virtual bool gate (function *)
39513 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
39514 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
39517 virtual unsigned int execute (function *fun)
39519 return rs6000_analyze_swaps (fun);
39522 }; // class pass_analyze_swaps
39524 rtl_opt_pass *
39525 make_pass_analyze_swaps (gcc::context *ctxt)
39527 return new pass_analyze_swaps (ctxt);
39530 #ifdef RS6000_GLIBC_ATOMIC_FENV
39531 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39532 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39533 #endif
39535 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39537 static void
39538 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39540 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
39542 #ifdef RS6000_GLIBC_ATOMIC_FENV
39543 if (atomic_hold_decl == NULL_TREE)
39545 atomic_hold_decl
39546 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39547 get_identifier ("__atomic_feholdexcept"),
39548 build_function_type_list (void_type_node,
39549 double_ptr_type_node,
39550 NULL_TREE));
39551 TREE_PUBLIC (atomic_hold_decl) = 1;
39552 DECL_EXTERNAL (atomic_hold_decl) = 1;
39555 if (atomic_clear_decl == NULL_TREE)
39557 atomic_clear_decl
39558 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39559 get_identifier ("__atomic_feclearexcept"),
39560 build_function_type_list (void_type_node,
39561 NULL_TREE));
39562 TREE_PUBLIC (atomic_clear_decl) = 1;
39563 DECL_EXTERNAL (atomic_clear_decl) = 1;
39566 tree const_double = build_qualified_type (double_type_node,
39567 TYPE_QUAL_CONST);
39568 tree const_double_ptr = build_pointer_type (const_double);
39569 if (atomic_update_decl == NULL_TREE)
39571 atomic_update_decl
39572 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39573 get_identifier ("__atomic_feupdateenv"),
39574 build_function_type_list (void_type_node,
39575 const_double_ptr,
39576 NULL_TREE));
39577 TREE_PUBLIC (atomic_update_decl) = 1;
39578 DECL_EXTERNAL (atomic_update_decl) = 1;
39581 tree fenv_var = create_tmp_var_raw (double_type_node);
39582 TREE_ADDRESSABLE (fenv_var) = 1;
39583 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39585 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39586 *clear = build_call_expr (atomic_clear_decl, 0);
39587 *update = build_call_expr (atomic_update_decl, 1,
39588 fold_convert (const_double_ptr, fenv_addr));
39589 #endif
39590 return;
39593 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39594 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39595 tree call_mffs = build_call_expr (mffs, 0);
39597 /* Generates the equivalent of feholdexcept (&fenv_var)
39599 *fenv_var = __builtin_mffs ();
39600 double fenv_hold;
39601 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39602 __builtin_mtfsf (0xff, fenv_hold); */
39604 /* Mask to clear everything except for the rounding modes and non-IEEE
39605 arithmetic flag. */
39606 const unsigned HOST_WIDE_INT hold_exception_mask =
39607 HOST_WIDE_INT_C (0xffffffff00000007);
39609 tree fenv_var = create_tmp_var_raw (double_type_node);
39611 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39613 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39614 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39615 build_int_cst (uint64_type_node,
39616 hold_exception_mask));
39618 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39619 fenv_llu_and);
39621 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39622 build_int_cst (unsigned_type_node, 0xff),
39623 fenv_hold_mtfsf);
39625 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39627 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39629 double fenv_clear = __builtin_mffs ();
39630 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39631 __builtin_mtfsf (0xff, fenv_clear); */
39633 /* Mask to clear everything except for the rounding modes and non-IEEE
39634 arithmetic flag. */
39635 const unsigned HOST_WIDE_INT clear_exception_mask =
39636 HOST_WIDE_INT_C (0xffffffff00000000);
39638 tree fenv_clear = create_tmp_var_raw (double_type_node);
39640 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39642 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39643 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39644 fenv_clean_llu,
39645 build_int_cst (uint64_type_node,
39646 clear_exception_mask));
39648 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39649 fenv_clear_llu_and);
39651 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39652 build_int_cst (unsigned_type_node, 0xff),
39653 fenv_clear_mtfsf);
39655 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39657 /* Generates the equivalent of feupdateenv (&fenv_var)
39659 double old_fenv = __builtin_mffs ();
39660 double fenv_update;
39661 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39662 (*(uint64_t*)fenv_var 0x1ff80fff);
39663 __builtin_mtfsf (0xff, fenv_update); */
39665 const unsigned HOST_WIDE_INT update_exception_mask =
39666 HOST_WIDE_INT_C (0xffffffff1fffff00);
39667 const unsigned HOST_WIDE_INT new_exception_mask =
39668 HOST_WIDE_INT_C (0x1ff80fff);
39670 tree old_fenv = create_tmp_var_raw (double_type_node);
39671 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39673 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39674 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39675 build_int_cst (uint64_type_node,
39676 update_exception_mask));
39678 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39679 build_int_cst (uint64_type_node,
39680 new_exception_mask));
39682 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39683 old_llu_and, new_llu_and);
39685 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39686 new_llu_mask);
39688 tree update_mtfsf = build_call_expr (mtfsf, 2,
39689 build_int_cst (unsigned_type_node, 0xff),
39690 fenv_update_mtfsf);
39692 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39695 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39697 static bool
39698 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39699 optimization_type opt_type)
39701 switch (op)
39703 case rsqrt_optab:
39704 return (opt_type == OPTIMIZE_FOR_SPEED
39705 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39707 default:
39708 return true;
39712 struct gcc_target targetm = TARGET_INITIALIZER;
39714 #include "gt-rs6000.h"