1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "stringpool.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
44 #include "fold-const.h"
46 #include "stor-layout.h"
48 #include "print-tree.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
57 #include "sched-int.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
70 #include "tree-pass.h"
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
76 #include "gstab.h" /* for N_SLINE */
78 #include "case-cfn-macros.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 static pad_direction
rs6000_function_arg_padding (machine_mode
, const_tree
);
94 /* Structure used to define the rs6000 stack */
95 typedef struct rs6000_stack
{
96 int reload_completed
; /* stack info won't change from here on */
97 int first_gp_reg_save
; /* first callee saved GP register used */
98 int first_fp_reg_save
; /* first callee saved FP register used */
99 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
100 int lr_save_p
; /* true if the link reg needs to be saved */
101 int cr_save_p
; /* true if the CR reg needs to be saved */
102 unsigned int vrsave_mask
; /* mask of vec registers to save */
103 int push_p
; /* true if we need to allocate stack space */
104 int calls_p
; /* true if the function makes any calls */
105 int world_save_p
; /* true if we're saving *everything*:
106 r13-r31, cr, f14-f31, vrsave, v20-v31 */
107 enum rs6000_abi abi
; /* which ABI to use */
108 int gp_save_offset
; /* offset to save GP regs from initial SP */
109 int fp_save_offset
; /* offset to save FP regs from initial SP */
110 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
111 int lr_save_offset
; /* offset to save LR from initial SP */
112 int cr_save_offset
; /* offset to save CR from initial SP */
113 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
114 int varargs_save_offset
; /* offset to save the varargs registers */
115 int ehrd_offset
; /* offset to EH return data */
116 int ehcr_offset
; /* offset to EH CR field data */
117 int reg_size
; /* register size (4 or 8) */
118 HOST_WIDE_INT vars_size
; /* variable save area size */
119 int parm_size
; /* outgoing parameter size */
120 int save_size
; /* save area size */
121 int fixed_size
; /* fixed size of stack frame */
122 int gp_size
; /* size of saved GP registers */
123 int fp_size
; /* size of saved FP registers */
124 int altivec_size
; /* size of saved AltiVec registers */
125 int cr_size
; /* size to hold CR if not in fixed area */
126 int vrsave_size
; /* size to hold VRSAVE */
127 int altivec_padding_size
; /* size of altivec alignment padding */
128 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame
;
138 /* Flags if __builtin_return_address (0) was used. */
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue
;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset
;
148 /* Alternative internal arg pointer for -fsplit-stack. */
149 rtx split_stack_arg_pointer
;
150 bool split_stack_argp_used
;
151 /* Flag if r2 setup is needed with ELFv2 ABI. */
152 bool r2_setup_needed
;
153 /* The number of components we use for separate shrink-wrapping. */
155 /* The components already handled by separate shrink-wrapping, which should
156 not be considered by the prologue and epilogue. */
157 bool gpr_is_wrapped_separately
[32];
158 bool fpr_is_wrapped_separately
[32];
159 bool lr_is_wrapped_separately
;
162 /* Support targetm.vectorize.builtin_mask_for_load. */
163 static GTY(()) tree altivec_builtin_mask_for_load
;
165 /* Set to nonzero once AIX common-mode calls have been defined. */
166 static GTY(()) int common_mode_defined
;
168 /* Label number of label created for -mrelocatable, to call to so we can
169 get the address of the GOT section */
170 static int rs6000_pic_labelno
;
173 /* Counter for labels which are to be placed in .fixup. */
174 int fixuplabelno
= 0;
177 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
180 /* Specify the machine mode that pointers have. After generation of rtl, the
181 compiler makes no further distinction between pointers and any other objects
182 of this machine mode. */
183 scalar_int_mode rs6000_pmode
;
185 /* Width in bits of a pointer. */
186 unsigned rs6000_pointer_size
;
188 #ifdef HAVE_AS_GNU_ATTRIBUTE
189 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
190 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
192 /* Flag whether floating point values have been passed/returned.
193 Note that this doesn't say whether fprs are used, since the
194 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
195 should be set for soft-float values passed in gprs and ieee128
196 values passed in vsx registers. */
197 static bool rs6000_passes_float
;
198 static bool rs6000_passes_long_double
;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector
;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct
;
205 /* Value is TRUE if register/mode pair is acceptable. */
206 static bool rs6000_hard_regno_mode_ok_p
207 [NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
209 /* Maximum number of registers needed for a given register class and mode. */
210 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
212 /* How many registers are needed for a given register and mode. */
213 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
215 /* Map register number to register class. */
216 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
218 static int dbg_cost_ctrl
;
220 /* Built in types. */
221 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
222 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
224 /* Flag to say the TOC is initialized */
225 int toc_initialized
, need_toc_init
;
226 char toc_label_name
[10];
228 /* Cached value of rs6000_variable_issue. This is cached in
229 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
230 static short cached_can_issue_more
;
232 static GTY(()) section
*read_only_data_section
;
233 static GTY(()) section
*private_data_section
;
234 static GTY(()) section
*tls_data_section
;
235 static GTY(()) section
*tls_private_data_section
;
236 static GTY(()) section
*read_only_private_data_section
;
237 static GTY(()) section
*sdata2_section
;
238 static GTY(()) section
*toc_section
;
240 struct builtin_description
242 const HOST_WIDE_INT mask
;
243 const enum insn_code icode
;
244 const char *const name
;
245 const enum rs6000_builtins code
;
248 /* Describe the vector unit used for modes. */
249 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
250 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
252 /* Register classes for various constraints that are based on the target
254 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
256 /* Describe the alignment of a vector. */
257 int rs6000_vector_align
[NUM_MACHINE_MODES
];
259 /* Map selected modes to types for builtins. */
260 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
262 /* What modes to automatically generate reciprocal divide estimate (fre) and
263 reciprocal sqrt (frsqrte) for. */
264 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
266 /* Masks to determine which reciprocal esitmate instructions to generate
268 enum rs6000_recip_mask
{
269 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
270 RECIP_DF_DIV
= 0x002,
271 RECIP_V4SF_DIV
= 0x004,
272 RECIP_V2DF_DIV
= 0x008,
274 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
275 RECIP_DF_RSQRT
= 0x020,
276 RECIP_V4SF_RSQRT
= 0x040,
277 RECIP_V2DF_RSQRT
= 0x080,
279 /* Various combination of flags for -mrecip=xxx. */
281 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
282 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
283 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
285 RECIP_HIGH_PRECISION
= RECIP_ALL
,
287 /* On low precision machines like the power5, don't enable double precision
288 reciprocal square root estimate, since it isn't accurate enough. */
289 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
292 /* -mrecip options. */
295 const char *string
; /* option name */
296 unsigned int mask
; /* mask bits to set */
297 } recip_options
[] = {
298 { "all", RECIP_ALL
},
299 { "none", RECIP_NONE
},
300 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
302 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
303 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
304 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
305 | RECIP_V2DF_RSQRT
) },
306 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
307 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
310 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
316 { "power9", PPC_PLATFORM_POWER9
},
317 { "power8", PPC_PLATFORM_POWER8
},
318 { "power7", PPC_PLATFORM_POWER7
},
319 { "power6x", PPC_PLATFORM_POWER6X
},
320 { "power6", PPC_PLATFORM_POWER6
},
321 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
322 { "power5", PPC_PLATFORM_POWER5
},
323 { "ppc970", PPC_PLATFORM_PPC970
},
324 { "power4", PPC_PLATFORM_POWER4
},
325 { "ppca2", PPC_PLATFORM_PPCA2
},
326 { "ppc476", PPC_PLATFORM_PPC476
},
327 { "ppc464", PPC_PLATFORM_PPC464
},
328 { "ppc440", PPC_PLATFORM_PPC440
},
329 { "ppc405", PPC_PLATFORM_PPC405
},
330 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
333 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
339 } cpu_supports_info
[] = {
340 /* AT_HWCAP masks. */
341 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
342 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
343 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
344 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
345 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
346 { "booke", PPC_FEATURE_BOOKE
, 0 },
347 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
348 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
349 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
350 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
351 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
352 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
353 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
354 { "notb", PPC_FEATURE_NO_TB
, 0 },
355 { "pa6t", PPC_FEATURE_PA6T
, 0 },
356 { "power4", PPC_FEATURE_POWER4
, 0 },
357 { "power5", PPC_FEATURE_POWER5
, 0 },
358 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
359 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
360 { "ppc32", PPC_FEATURE_32
, 0 },
361 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
362 { "ppc64", PPC_FEATURE_64
, 0 },
363 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
364 { "smt", PPC_FEATURE_SMT
, 0 },
365 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
366 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
367 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
368 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
370 /* AT_HWCAP2 masks. */
371 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
372 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
373 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
374 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
375 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
376 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
377 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
378 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
379 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
380 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
381 { "darn", PPC_FEATURE2_DARN
, 1 },
382 { "scv", PPC_FEATURE2_SCV
, 1 }
385 /* On PowerPC, we have a limited number of target clones that we care about
386 which means we can use an array to hold the options, rather than having more
387 elaborate data structures to identify each possible variation. Order the
388 clones from the default to the highest ISA. */
390 CLONE_DEFAULT
= 0, /* default clone. */
391 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
392 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
393 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
394 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
398 /* Map compiler ISA bits into HWCAP names. */
400 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
401 const char *name
; /* name to use in __builtin_cpu_supports. */
404 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
405 { 0, "" }, /* Default options. */
406 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
407 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
408 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
409 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
413 /* Newer LIBCs explicitly export this symbol to declare that they provide
414 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
415 reference to this symbol whenever we expand a CPU builtin, so that
416 we never link against an old LIBC. */
417 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
419 /* True if we have expanded a CPU builtin. */
422 /* Pointer to function (in rs6000-c.c) that can define or undefine target
423 macros that have changed. Languages that don't support the preprocessor
424 don't link in rs6000-c.c, so we can't call it directly. */
425 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
427 /* Simplfy register classes into simpler classifications. We assume
428 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
429 check for standard register classes (gpr/floating/altivec/vsx) and
430 floating/vector classes (float/altivec/vsx). */
432 enum rs6000_reg_type
{
443 /* Map register class to register type. */
444 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
446 /* First/last register type for the 'normal' register types (i.e. general
447 purpose, floating point, altivec, and VSX registers). */
448 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
450 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
453 /* Register classes we care about in secondary reload or go if legitimate
454 address. We only need to worry about GPR, FPR, and Altivec registers here,
455 along an ANY field that is the OR of the 3 register classes. */
457 enum rs6000_reload_reg_type
{
458 RELOAD_REG_GPR
, /* General purpose registers. */
459 RELOAD_REG_FPR
, /* Traditional floating point regs. */
460 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
461 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
465 /* For setting up register classes, loop through the 3 register classes mapping
466 into real registers, and skip the ANY class, which is just an OR of the
468 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
469 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
471 /* Map reload register type to a register in the register class. */
472 struct reload_reg_map_type
{
473 const char *name
; /* Register class name. */
474 int reg
; /* Register in the register class. */
477 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
478 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
479 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
480 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
481 { "Any", -1 }, /* RELOAD_REG_ANY. */
484 /* Mask bits for each register class, indexed per mode. Historically the
485 compiler has been more restrictive which types can do PRE_MODIFY instead of
486 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
487 typedef unsigned char addr_mask_type
;
489 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
490 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
491 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
492 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
493 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
494 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
495 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
496 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
498 /* Register type masks based on the type, of valid addressing modes. */
499 struct rs6000_reg_addr
{
500 enum insn_code reload_load
; /* INSN to reload for loading. */
501 enum insn_code reload_store
; /* INSN to reload for storing. */
502 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
503 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
504 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
505 enum insn_code fusion_gpr_ld
; /* INSN for fusing gpr ADDIS/loads. */
506 /* INSNs for fusing addi with loads
507 or stores for each reg. class. */
508 enum insn_code fusion_addi_ld
[(int)N_RELOAD_REG
];
509 enum insn_code fusion_addi_st
[(int)N_RELOAD_REG
];
510 /* INSNs for fusing addis with loads
511 or stores for each reg. class. */
512 enum insn_code fusion_addis_ld
[(int)N_RELOAD_REG
];
513 enum insn_code fusion_addis_st
[(int)N_RELOAD_REG
];
514 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
515 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
516 bool fused_toc
; /* Mode supports TOC fusion. */
519 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
521 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
523 mode_supports_pre_incdec_p (machine_mode mode
)
525 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
529 /* Helper function to say whether a mode supports PRE_MODIFY. */
531 mode_supports_pre_modify_p (machine_mode mode
)
533 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
537 /* Given that there exists at least one variable that is set (produced)
538 by OUT_INSN and read (consumed) by IN_INSN, return true iff
539 IN_INSN represents one or more memory store operations and none of
540 the variables set by OUT_INSN is used by IN_INSN as the address of a
541 store operation. If either IN_INSN or OUT_INSN does not represent
542 a "single" RTL SET expression (as loosely defined by the
543 implementation of the single_set function) or a PARALLEL with only
544 SETs, CLOBBERs, and USEs inside, this function returns false.
546 This rs6000-specific version of store_data_bypass_p checks for
547 certain conditions that result in assertion failures (and internal
548 compiler errors) in the generic store_data_bypass_p function and
549 returns false rather than calling store_data_bypass_p if one of the
550 problematic conditions is detected. */
553 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
560 in_set
= single_set (in_insn
);
563 if (MEM_P (SET_DEST (in_set
)))
565 out_set
= single_set (out_insn
);
568 out_pat
= PATTERN (out_insn
);
569 if (GET_CODE (out_pat
) == PARALLEL
)
571 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
573 out_exp
= XVECEXP (out_pat
, 0, i
);
574 if ((GET_CODE (out_exp
) == CLOBBER
)
575 || (GET_CODE (out_exp
) == USE
))
577 else if (GET_CODE (out_exp
) != SET
)
586 in_pat
= PATTERN (in_insn
);
587 if (GET_CODE (in_pat
) != PARALLEL
)
590 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
592 in_exp
= XVECEXP (in_pat
, 0, i
);
593 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
595 else if (GET_CODE (in_exp
) != SET
)
598 if (MEM_P (SET_DEST (in_exp
)))
600 out_set
= single_set (out_insn
);
603 out_pat
= PATTERN (out_insn
);
604 if (GET_CODE (out_pat
) != PARALLEL
)
606 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
608 out_exp
= XVECEXP (out_pat
, 0, j
);
609 if ((GET_CODE (out_exp
) == CLOBBER
)
610 || (GET_CODE (out_exp
) == USE
))
612 else if (GET_CODE (out_exp
) != SET
)
619 return store_data_bypass_p (out_insn
, in_insn
);
622 /* Return true if we have D-form addressing in altivec registers. */
624 mode_supports_vmx_dform (machine_mode mode
)
626 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
629 /* Return true if we have D-form addressing in VSX registers. This addressing
630 is more limited than normal d-form addressing in that the offset must be
631 aligned on a 16-byte boundary. */
633 mode_supports_vsx_dform_quad (machine_mode mode
)
635 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
640 /* Processor costs (relative to an add) */
642 const struct processor_costs
*rs6000_cost
;
644 /* Instruction size costs on 32bit processors. */
646 struct processor_costs size32_cost
= {
647 COSTS_N_INSNS (1), /* mulsi */
648 COSTS_N_INSNS (1), /* mulsi_const */
649 COSTS_N_INSNS (1), /* mulsi_const9 */
650 COSTS_N_INSNS (1), /* muldi */
651 COSTS_N_INSNS (1), /* divsi */
652 COSTS_N_INSNS (1), /* divdi */
653 COSTS_N_INSNS (1), /* fp */
654 COSTS_N_INSNS (1), /* dmul */
655 COSTS_N_INSNS (1), /* sdiv */
656 COSTS_N_INSNS (1), /* ddiv */
657 32, /* cache line size */
661 0, /* SF->DF convert */
664 /* Instruction size costs on 64bit processors. */
666 struct processor_costs size64_cost
= {
667 COSTS_N_INSNS (1), /* mulsi */
668 COSTS_N_INSNS (1), /* mulsi_const */
669 COSTS_N_INSNS (1), /* mulsi_const9 */
670 COSTS_N_INSNS (1), /* muldi */
671 COSTS_N_INSNS (1), /* divsi */
672 COSTS_N_INSNS (1), /* divdi */
673 COSTS_N_INSNS (1), /* fp */
674 COSTS_N_INSNS (1), /* dmul */
675 COSTS_N_INSNS (1), /* sdiv */
676 COSTS_N_INSNS (1), /* ddiv */
677 128, /* cache line size */
681 0, /* SF->DF convert */
684 /* Instruction costs on RS64A processors. */
686 struct processor_costs rs64a_cost
= {
687 COSTS_N_INSNS (20), /* mulsi */
688 COSTS_N_INSNS (12), /* mulsi_const */
689 COSTS_N_INSNS (8), /* mulsi_const9 */
690 COSTS_N_INSNS (34), /* muldi */
691 COSTS_N_INSNS (65), /* divsi */
692 COSTS_N_INSNS (67), /* divdi */
693 COSTS_N_INSNS (4), /* fp */
694 COSTS_N_INSNS (4), /* dmul */
695 COSTS_N_INSNS (31), /* sdiv */
696 COSTS_N_INSNS (31), /* ddiv */
697 128, /* cache line size */
701 0, /* SF->DF convert */
704 /* Instruction costs on MPCCORE processors. */
706 struct processor_costs mpccore_cost
= {
707 COSTS_N_INSNS (2), /* mulsi */
708 COSTS_N_INSNS (2), /* mulsi_const */
709 COSTS_N_INSNS (2), /* mulsi_const9 */
710 COSTS_N_INSNS (2), /* muldi */
711 COSTS_N_INSNS (6), /* divsi */
712 COSTS_N_INSNS (6), /* divdi */
713 COSTS_N_INSNS (4), /* fp */
714 COSTS_N_INSNS (5), /* dmul */
715 COSTS_N_INSNS (10), /* sdiv */
716 COSTS_N_INSNS (17), /* ddiv */
717 32, /* cache line size */
721 0, /* SF->DF convert */
724 /* Instruction costs on PPC403 processors. */
726 struct processor_costs ppc403_cost
= {
727 COSTS_N_INSNS (4), /* mulsi */
728 COSTS_N_INSNS (4), /* mulsi_const */
729 COSTS_N_INSNS (4), /* mulsi_const9 */
730 COSTS_N_INSNS (4), /* muldi */
731 COSTS_N_INSNS (33), /* divsi */
732 COSTS_N_INSNS (33), /* divdi */
733 COSTS_N_INSNS (11), /* fp */
734 COSTS_N_INSNS (11), /* dmul */
735 COSTS_N_INSNS (11), /* sdiv */
736 COSTS_N_INSNS (11), /* ddiv */
737 32, /* cache line size */
741 0, /* SF->DF convert */
744 /* Instruction costs on PPC405 processors. */
746 struct processor_costs ppc405_cost
= {
747 COSTS_N_INSNS (5), /* mulsi */
748 COSTS_N_INSNS (4), /* mulsi_const */
749 COSTS_N_INSNS (3), /* mulsi_const9 */
750 COSTS_N_INSNS (5), /* muldi */
751 COSTS_N_INSNS (35), /* divsi */
752 COSTS_N_INSNS (35), /* divdi */
753 COSTS_N_INSNS (11), /* fp */
754 COSTS_N_INSNS (11), /* dmul */
755 COSTS_N_INSNS (11), /* sdiv */
756 COSTS_N_INSNS (11), /* ddiv */
757 32, /* cache line size */
761 0, /* SF->DF convert */
764 /* Instruction costs on PPC440 processors. */
766 struct processor_costs ppc440_cost
= {
767 COSTS_N_INSNS (3), /* mulsi */
768 COSTS_N_INSNS (2), /* mulsi_const */
769 COSTS_N_INSNS (2), /* mulsi_const9 */
770 COSTS_N_INSNS (3), /* muldi */
771 COSTS_N_INSNS (34), /* divsi */
772 COSTS_N_INSNS (34), /* divdi */
773 COSTS_N_INSNS (5), /* fp */
774 COSTS_N_INSNS (5), /* dmul */
775 COSTS_N_INSNS (19), /* sdiv */
776 COSTS_N_INSNS (33), /* ddiv */
777 32, /* cache line size */
781 0, /* SF->DF convert */
784 /* Instruction costs on PPC476 processors. */
786 struct processor_costs ppc476_cost
= {
787 COSTS_N_INSNS (4), /* mulsi */
788 COSTS_N_INSNS (4), /* mulsi_const */
789 COSTS_N_INSNS (4), /* mulsi_const9 */
790 COSTS_N_INSNS (4), /* muldi */
791 COSTS_N_INSNS (11), /* divsi */
792 COSTS_N_INSNS (11), /* divdi */
793 COSTS_N_INSNS (6), /* fp */
794 COSTS_N_INSNS (6), /* dmul */
795 COSTS_N_INSNS (19), /* sdiv */
796 COSTS_N_INSNS (33), /* ddiv */
797 32, /* l1 cache line size */
801 0, /* SF->DF convert */
804 /* Instruction costs on PPC601 processors. */
806 struct processor_costs ppc601_cost
= {
807 COSTS_N_INSNS (5), /* mulsi */
808 COSTS_N_INSNS (5), /* mulsi_const */
809 COSTS_N_INSNS (5), /* mulsi_const9 */
810 COSTS_N_INSNS (5), /* muldi */
811 COSTS_N_INSNS (36), /* divsi */
812 COSTS_N_INSNS (36), /* divdi */
813 COSTS_N_INSNS (4), /* fp */
814 COSTS_N_INSNS (5), /* dmul */
815 COSTS_N_INSNS (17), /* sdiv */
816 COSTS_N_INSNS (31), /* ddiv */
817 32, /* cache line size */
821 0, /* SF->DF convert */
824 /* Instruction costs on PPC603 processors. */
826 struct processor_costs ppc603_cost
= {
827 COSTS_N_INSNS (5), /* mulsi */
828 COSTS_N_INSNS (3), /* mulsi_const */
829 COSTS_N_INSNS (2), /* mulsi_const9 */
830 COSTS_N_INSNS (5), /* muldi */
831 COSTS_N_INSNS (37), /* divsi */
832 COSTS_N_INSNS (37), /* divdi */
833 COSTS_N_INSNS (3), /* fp */
834 COSTS_N_INSNS (4), /* dmul */
835 COSTS_N_INSNS (18), /* sdiv */
836 COSTS_N_INSNS (33), /* ddiv */
837 32, /* cache line size */
841 0, /* SF->DF convert */
844 /* Instruction costs on PPC604 processors. */
846 struct processor_costs ppc604_cost
= {
847 COSTS_N_INSNS (4), /* mulsi */
848 COSTS_N_INSNS (4), /* mulsi_const */
849 COSTS_N_INSNS (4), /* mulsi_const9 */
850 COSTS_N_INSNS (4), /* muldi */
851 COSTS_N_INSNS (20), /* divsi */
852 COSTS_N_INSNS (20), /* divdi */
853 COSTS_N_INSNS (3), /* fp */
854 COSTS_N_INSNS (3), /* dmul */
855 COSTS_N_INSNS (18), /* sdiv */
856 COSTS_N_INSNS (32), /* ddiv */
857 32, /* cache line size */
861 0, /* SF->DF convert */
864 /* Instruction costs on PPC604e processors. */
866 struct processor_costs ppc604e_cost
= {
867 COSTS_N_INSNS (2), /* mulsi */
868 COSTS_N_INSNS (2), /* mulsi_const */
869 COSTS_N_INSNS (2), /* mulsi_const9 */
870 COSTS_N_INSNS (2), /* muldi */
871 COSTS_N_INSNS (20), /* divsi */
872 COSTS_N_INSNS (20), /* divdi */
873 COSTS_N_INSNS (3), /* fp */
874 COSTS_N_INSNS (3), /* dmul */
875 COSTS_N_INSNS (18), /* sdiv */
876 COSTS_N_INSNS (32), /* ddiv */
877 32, /* cache line size */
881 0, /* SF->DF convert */
884 /* Instruction costs on PPC620 processors. */
886 struct processor_costs ppc620_cost
= {
887 COSTS_N_INSNS (5), /* mulsi */
888 COSTS_N_INSNS (4), /* mulsi_const */
889 COSTS_N_INSNS (3), /* mulsi_const9 */
890 COSTS_N_INSNS (7), /* muldi */
891 COSTS_N_INSNS (21), /* divsi */
892 COSTS_N_INSNS (37), /* divdi */
893 COSTS_N_INSNS (3), /* fp */
894 COSTS_N_INSNS (3), /* dmul */
895 COSTS_N_INSNS (18), /* sdiv */
896 COSTS_N_INSNS (32), /* ddiv */
897 128, /* cache line size */
901 0, /* SF->DF convert */
904 /* Instruction costs on PPC630 processors. */
906 struct processor_costs ppc630_cost
= {
907 COSTS_N_INSNS (5), /* mulsi */
908 COSTS_N_INSNS (4), /* mulsi_const */
909 COSTS_N_INSNS (3), /* mulsi_const9 */
910 COSTS_N_INSNS (7), /* muldi */
911 COSTS_N_INSNS (21), /* divsi */
912 COSTS_N_INSNS (37), /* divdi */
913 COSTS_N_INSNS (3), /* fp */
914 COSTS_N_INSNS (3), /* dmul */
915 COSTS_N_INSNS (17), /* sdiv */
916 COSTS_N_INSNS (21), /* ddiv */
917 128, /* cache line size */
921 0, /* SF->DF convert */
924 /* Instruction costs on Cell processor. */
925 /* COSTS_N_INSNS (1) ~ one add. */
927 struct processor_costs ppccell_cost
= {
928 COSTS_N_INSNS (9/2)+2, /* mulsi */
929 COSTS_N_INSNS (6/2), /* mulsi_const */
930 COSTS_N_INSNS (6/2), /* mulsi_const9 */
931 COSTS_N_INSNS (15/2)+2, /* muldi */
932 COSTS_N_INSNS (38/2), /* divsi */
933 COSTS_N_INSNS (70/2), /* divdi */
934 COSTS_N_INSNS (10/2), /* fp */
935 COSTS_N_INSNS (10/2), /* dmul */
936 COSTS_N_INSNS (74/2), /* sdiv */
937 COSTS_N_INSNS (74/2), /* ddiv */
938 128, /* cache line size */
942 0, /* SF->DF convert */
945 /* Instruction costs on PPC750 and PPC7400 processors. */
947 struct processor_costs ppc750_cost
= {
948 COSTS_N_INSNS (5), /* mulsi */
949 COSTS_N_INSNS (3), /* mulsi_const */
950 COSTS_N_INSNS (2), /* mulsi_const9 */
951 COSTS_N_INSNS (5), /* muldi */
952 COSTS_N_INSNS (17), /* divsi */
953 COSTS_N_INSNS (17), /* divdi */
954 COSTS_N_INSNS (3), /* fp */
955 COSTS_N_INSNS (3), /* dmul */
956 COSTS_N_INSNS (17), /* sdiv */
957 COSTS_N_INSNS (31), /* ddiv */
958 32, /* cache line size */
962 0, /* SF->DF convert */
965 /* Instruction costs on PPC7450 processors. */
967 struct processor_costs ppc7450_cost
= {
968 COSTS_N_INSNS (4), /* mulsi */
969 COSTS_N_INSNS (3), /* mulsi_const */
970 COSTS_N_INSNS (3), /* mulsi_const9 */
971 COSTS_N_INSNS (4), /* muldi */
972 COSTS_N_INSNS (23), /* divsi */
973 COSTS_N_INSNS (23), /* divdi */
974 COSTS_N_INSNS (5), /* fp */
975 COSTS_N_INSNS (5), /* dmul */
976 COSTS_N_INSNS (21), /* sdiv */
977 COSTS_N_INSNS (35), /* ddiv */
978 32, /* cache line size */
982 0, /* SF->DF convert */
985 /* Instruction costs on PPC8540 processors. */
987 struct processor_costs ppc8540_cost
= {
988 COSTS_N_INSNS (4), /* mulsi */
989 COSTS_N_INSNS (4), /* mulsi_const */
990 COSTS_N_INSNS (4), /* mulsi_const9 */
991 COSTS_N_INSNS (4), /* muldi */
992 COSTS_N_INSNS (19), /* divsi */
993 COSTS_N_INSNS (19), /* divdi */
994 COSTS_N_INSNS (4), /* fp */
995 COSTS_N_INSNS (4), /* dmul */
996 COSTS_N_INSNS (29), /* sdiv */
997 COSTS_N_INSNS (29), /* ddiv */
998 32, /* cache line size */
1001 1, /* prefetch streams /*/
1002 0, /* SF->DF convert */
1005 /* Instruction costs on E300C2 and E300C3 cores. */
1007 struct processor_costs ppce300c2c3_cost
= {
1008 COSTS_N_INSNS (4), /* mulsi */
1009 COSTS_N_INSNS (4), /* mulsi_const */
1010 COSTS_N_INSNS (4), /* mulsi_const9 */
1011 COSTS_N_INSNS (4), /* muldi */
1012 COSTS_N_INSNS (19), /* divsi */
1013 COSTS_N_INSNS (19), /* divdi */
1014 COSTS_N_INSNS (3), /* fp */
1015 COSTS_N_INSNS (4), /* dmul */
1016 COSTS_N_INSNS (18), /* sdiv */
1017 COSTS_N_INSNS (33), /* ddiv */
1021 1, /* prefetch streams /*/
1022 0, /* SF->DF convert */
1025 /* Instruction costs on PPCE500MC processors. */
1027 struct processor_costs ppce500mc_cost
= {
1028 COSTS_N_INSNS (4), /* mulsi */
1029 COSTS_N_INSNS (4), /* mulsi_const */
1030 COSTS_N_INSNS (4), /* mulsi_const9 */
1031 COSTS_N_INSNS (4), /* muldi */
1032 COSTS_N_INSNS (14), /* divsi */
1033 COSTS_N_INSNS (14), /* divdi */
1034 COSTS_N_INSNS (8), /* fp */
1035 COSTS_N_INSNS (10), /* dmul */
1036 COSTS_N_INSNS (36), /* sdiv */
1037 COSTS_N_INSNS (66), /* ddiv */
1038 64, /* cache line size */
1041 1, /* prefetch streams /*/
1042 0, /* SF->DF convert */
1045 /* Instruction costs on PPCE500MC64 processors. */
1047 struct processor_costs ppce500mc64_cost
= {
1048 COSTS_N_INSNS (4), /* mulsi */
1049 COSTS_N_INSNS (4), /* mulsi_const */
1050 COSTS_N_INSNS (4), /* mulsi_const9 */
1051 COSTS_N_INSNS (4), /* muldi */
1052 COSTS_N_INSNS (14), /* divsi */
1053 COSTS_N_INSNS (14), /* divdi */
1054 COSTS_N_INSNS (4), /* fp */
1055 COSTS_N_INSNS (10), /* dmul */
1056 COSTS_N_INSNS (36), /* sdiv */
1057 COSTS_N_INSNS (66), /* ddiv */
1058 64, /* cache line size */
1061 1, /* prefetch streams /*/
1062 0, /* SF->DF convert */
1065 /* Instruction costs on PPCE5500 processors. */
1067 struct processor_costs ppce5500_cost
= {
1068 COSTS_N_INSNS (5), /* mulsi */
1069 COSTS_N_INSNS (5), /* mulsi_const */
1070 COSTS_N_INSNS (4), /* mulsi_const9 */
1071 COSTS_N_INSNS (5), /* muldi */
1072 COSTS_N_INSNS (14), /* divsi */
1073 COSTS_N_INSNS (14), /* divdi */
1074 COSTS_N_INSNS (7), /* fp */
1075 COSTS_N_INSNS (10), /* dmul */
1076 COSTS_N_INSNS (36), /* sdiv */
1077 COSTS_N_INSNS (66), /* ddiv */
1078 64, /* cache line size */
1081 1, /* prefetch streams /*/
1082 0, /* SF->DF convert */
1085 /* Instruction costs on PPCE6500 processors. */
1087 struct processor_costs ppce6500_cost
= {
1088 COSTS_N_INSNS (5), /* mulsi */
1089 COSTS_N_INSNS (5), /* mulsi_const */
1090 COSTS_N_INSNS (4), /* mulsi_const9 */
1091 COSTS_N_INSNS (5), /* muldi */
1092 COSTS_N_INSNS (14), /* divsi */
1093 COSTS_N_INSNS (14), /* divdi */
1094 COSTS_N_INSNS (7), /* fp */
1095 COSTS_N_INSNS (10), /* dmul */
1096 COSTS_N_INSNS (36), /* sdiv */
1097 COSTS_N_INSNS (66), /* ddiv */
1098 64, /* cache line size */
1101 1, /* prefetch streams /*/
1102 0, /* SF->DF convert */
1105 /* Instruction costs on AppliedMicro Titan processors. */
1107 struct processor_costs titan_cost
= {
1108 COSTS_N_INSNS (5), /* mulsi */
1109 COSTS_N_INSNS (5), /* mulsi_const */
1110 COSTS_N_INSNS (5), /* mulsi_const9 */
1111 COSTS_N_INSNS (5), /* muldi */
1112 COSTS_N_INSNS (18), /* divsi */
1113 COSTS_N_INSNS (18), /* divdi */
1114 COSTS_N_INSNS (10), /* fp */
1115 COSTS_N_INSNS (10), /* dmul */
1116 COSTS_N_INSNS (46), /* sdiv */
1117 COSTS_N_INSNS (72), /* ddiv */
1118 32, /* cache line size */
1121 1, /* prefetch streams /*/
1122 0, /* SF->DF convert */
1125 /* Instruction costs on POWER4 and POWER5 processors. */
1127 struct processor_costs power4_cost
= {
1128 COSTS_N_INSNS (3), /* mulsi */
1129 COSTS_N_INSNS (2), /* mulsi_const */
1130 COSTS_N_INSNS (2), /* mulsi_const9 */
1131 COSTS_N_INSNS (4), /* muldi */
1132 COSTS_N_INSNS (18), /* divsi */
1133 COSTS_N_INSNS (34), /* divdi */
1134 COSTS_N_INSNS (3), /* fp */
1135 COSTS_N_INSNS (3), /* dmul */
1136 COSTS_N_INSNS (17), /* sdiv */
1137 COSTS_N_INSNS (17), /* ddiv */
1138 128, /* cache line size */
1140 1024, /* l2 cache */
1141 8, /* prefetch streams /*/
1142 0, /* SF->DF convert */
1145 /* Instruction costs on POWER6 processors. */
1147 struct processor_costs power6_cost
= {
1148 COSTS_N_INSNS (8), /* mulsi */
1149 COSTS_N_INSNS (8), /* mulsi_const */
1150 COSTS_N_INSNS (8), /* mulsi_const9 */
1151 COSTS_N_INSNS (8), /* muldi */
1152 COSTS_N_INSNS (22), /* divsi */
1153 COSTS_N_INSNS (28), /* divdi */
1154 COSTS_N_INSNS (3), /* fp */
1155 COSTS_N_INSNS (3), /* dmul */
1156 COSTS_N_INSNS (13), /* sdiv */
1157 COSTS_N_INSNS (16), /* ddiv */
1158 128, /* cache line size */
1160 2048, /* l2 cache */
1161 16, /* prefetch streams */
1162 0, /* SF->DF convert */
1165 /* Instruction costs on POWER7 processors. */
1167 struct processor_costs power7_cost
= {
1168 COSTS_N_INSNS (2), /* mulsi */
1169 COSTS_N_INSNS (2), /* mulsi_const */
1170 COSTS_N_INSNS (2), /* mulsi_const9 */
1171 COSTS_N_INSNS (2), /* muldi */
1172 COSTS_N_INSNS (18), /* divsi */
1173 COSTS_N_INSNS (34), /* divdi */
1174 COSTS_N_INSNS (3), /* fp */
1175 COSTS_N_INSNS (3), /* dmul */
1176 COSTS_N_INSNS (13), /* sdiv */
1177 COSTS_N_INSNS (16), /* ddiv */
1178 128, /* cache line size */
1181 12, /* prefetch streams */
1182 COSTS_N_INSNS (3), /* SF->DF convert */
1185 /* Instruction costs on POWER8 processors. */
1187 struct processor_costs power8_cost
= {
1188 COSTS_N_INSNS (3), /* mulsi */
1189 COSTS_N_INSNS (3), /* mulsi_const */
1190 COSTS_N_INSNS (3), /* mulsi_const9 */
1191 COSTS_N_INSNS (3), /* muldi */
1192 COSTS_N_INSNS (19), /* divsi */
1193 COSTS_N_INSNS (35), /* divdi */
1194 COSTS_N_INSNS (3), /* fp */
1195 COSTS_N_INSNS (3), /* dmul */
1196 COSTS_N_INSNS (14), /* sdiv */
1197 COSTS_N_INSNS (17), /* ddiv */
1198 128, /* cache line size */
1201 12, /* prefetch streams */
1202 COSTS_N_INSNS (3), /* SF->DF convert */
1205 /* Instruction costs on POWER9 processors. */
1207 struct processor_costs power9_cost
= {
1208 COSTS_N_INSNS (3), /* mulsi */
1209 COSTS_N_INSNS (3), /* mulsi_const */
1210 COSTS_N_INSNS (3), /* mulsi_const9 */
1211 COSTS_N_INSNS (3), /* muldi */
1212 COSTS_N_INSNS (8), /* divsi */
1213 COSTS_N_INSNS (12), /* divdi */
1214 COSTS_N_INSNS (3), /* fp */
1215 COSTS_N_INSNS (3), /* dmul */
1216 COSTS_N_INSNS (13), /* sdiv */
1217 COSTS_N_INSNS (18), /* ddiv */
1218 128, /* cache line size */
1221 8, /* prefetch streams */
1222 COSTS_N_INSNS (3), /* SF->DF convert */
1225 /* Instruction costs on POWER A2 processors. */
1227 struct processor_costs ppca2_cost
= {
1228 COSTS_N_INSNS (16), /* mulsi */
1229 COSTS_N_INSNS (16), /* mulsi_const */
1230 COSTS_N_INSNS (16), /* mulsi_const9 */
1231 COSTS_N_INSNS (16), /* muldi */
1232 COSTS_N_INSNS (22), /* divsi */
1233 COSTS_N_INSNS (28), /* divdi */
1234 COSTS_N_INSNS (3), /* fp */
1235 COSTS_N_INSNS (3), /* dmul */
1236 COSTS_N_INSNS (59), /* sdiv */
1237 COSTS_N_INSNS (72), /* ddiv */
1240 2048, /* l2 cache */
1241 16, /* prefetch streams */
1242 0, /* SF->DF convert */
1246 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1247 #undef RS6000_BUILTIN_0
1248 #undef RS6000_BUILTIN_1
1249 #undef RS6000_BUILTIN_2
1250 #undef RS6000_BUILTIN_3
1251 #undef RS6000_BUILTIN_A
1252 #undef RS6000_BUILTIN_D
1253 #undef RS6000_BUILTIN_H
1254 #undef RS6000_BUILTIN_P
1255 #undef RS6000_BUILTIN_Q
1256 #undef RS6000_BUILTIN_X
1258 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1259 { NAME, ICODE, MASK, ATTR },
1261 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1262 { NAME, ICODE, MASK, ATTR },
1264 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1265 { NAME, ICODE, MASK, ATTR },
1267 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1268 { NAME, ICODE, MASK, ATTR },
1270 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1271 { NAME, ICODE, MASK, ATTR },
1273 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1274 { NAME, ICODE, MASK, ATTR },
1276 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 struct rs6000_builtin_info_type
{
1290 const enum insn_code icode
;
1291 const HOST_WIDE_INT mask
;
1292 const unsigned attr
;
1295 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1297 #include "rs6000-builtin.def"
1300 #undef RS6000_BUILTIN_0
1301 #undef RS6000_BUILTIN_1
1302 #undef RS6000_BUILTIN_2
1303 #undef RS6000_BUILTIN_3
1304 #undef RS6000_BUILTIN_A
1305 #undef RS6000_BUILTIN_D
1306 #undef RS6000_BUILTIN_H
1307 #undef RS6000_BUILTIN_P
1308 #undef RS6000_BUILTIN_Q
1309 #undef RS6000_BUILTIN_X
1311 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1312 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1315 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1316 static struct machine_function
* rs6000_init_machine_status (void);
1317 static int rs6000_ra_ever_killed (void);
1318 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1319 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1320 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1321 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1322 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1323 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1324 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1325 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1327 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1329 static bool is_microcoded_insn (rtx_insn
*);
1330 static bool is_nonpipeline_insn (rtx_insn
*);
1331 static bool is_cracked_insn (rtx_insn
*);
1332 static bool is_load_insn (rtx
, rtx
*);
1333 static bool is_store_insn (rtx
, rtx
*);
1334 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1335 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1336 static bool insn_must_be_first_in_group (rtx_insn
*);
1337 static bool insn_must_be_last_in_group (rtx_insn
*);
1338 static void altivec_init_builtins (void);
1339 static tree
builtin_function_type (machine_mode
, machine_mode
,
1340 machine_mode
, machine_mode
,
1341 enum rs6000_builtins
, const char *name
);
1342 static void rs6000_common_init_builtins (void);
1343 static void paired_init_builtins (void);
1344 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1345 static void htm_init_builtins (void);
1346 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
1347 static rs6000_stack_t
*rs6000_stack_info (void);
1348 static void is_altivec_return_reg (rtx
, void *);
1349 int easy_vector_constant (rtx
, machine_mode
);
1350 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1351 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1352 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1355 static void macho_branch_islands (void);
1357 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1359 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1361 static bool rs6000_mode_dependent_address (const_rtx
);
1362 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1363 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1365 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1368 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1369 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1371 static bool rs6000_debug_secondary_memory_needed (machine_mode
,
1374 static bool rs6000_debug_can_change_mode_class (machine_mode
,
1377 static bool rs6000_save_toc_in_prologue_p (void);
1378 static rtx
rs6000_internal_arg_pointer (void);
1380 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1382 = rs6000_legitimize_reload_address
;
1384 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1385 = rs6000_mode_dependent_address
;
1387 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1389 = rs6000_secondary_reload_class
;
1391 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1392 = rs6000_preferred_reload_class
;
1394 const int INSN_NOT_AVAILABLE
= -1;
1396 static void rs6000_print_isa_options (FILE *, int, const char *,
1398 static void rs6000_print_builtin_options (FILE *, int, const char *,
1400 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1402 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1403 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1404 enum rs6000_reg_type
,
1406 secondary_reload_info
*,
1408 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1409 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1410 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1412 /* Hash table stuff for keeping track of TOC entries. */
1414 struct GTY((for_user
)) toc_hash_struct
1416 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1417 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1419 machine_mode key_mode
;
1423 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1425 static hashval_t
hash (toc_hash_struct
*);
1426 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1429 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1431 /* Hash table to keep track of the argument types for builtin functions. */
1433 struct GTY((for_user
)) builtin_hash_struct
1436 machine_mode mode
[4]; /* return value + 3 arguments. */
1437 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1440 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1442 static hashval_t
hash (builtin_hash_struct
*);
1443 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1446 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1449 /* Default register names. */
1450 char rs6000_reg_names
[][8] =
1452 "0", "1", "2", "3", "4", "5", "6", "7",
1453 "8", "9", "10", "11", "12", "13", "14", "15",
1454 "16", "17", "18", "19", "20", "21", "22", "23",
1455 "24", "25", "26", "27", "28", "29", "30", "31",
1456 "0", "1", "2", "3", "4", "5", "6", "7",
1457 "8", "9", "10", "11", "12", "13", "14", "15",
1458 "16", "17", "18", "19", "20", "21", "22", "23",
1459 "24", "25", "26", "27", "28", "29", "30", "31",
1460 "mq", "lr", "ctr","ap",
1461 "0", "1", "2", "3", "4", "5", "6", "7",
1463 /* AltiVec registers. */
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1469 /* Soft frame pointer. */
1471 /* HTM SPR registers. */
1472 "tfhar", "tfiar", "texasr"
1475 #ifdef TARGET_REGNAMES
1476 static const char alt_reg_names
[][8] =
1478 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1479 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1480 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1481 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1482 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1483 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1484 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1485 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1486 "mq", "lr", "ctr", "ap",
1487 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1489 /* AltiVec registers. */
1490 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1491 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1492 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1493 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1495 /* Soft frame pointer. */
1497 /* HTM SPR registers. */
1498 "tfhar", "tfiar", "texasr"
1502 /* Table of valid machine attributes. */
1504 static const struct attribute_spec rs6000_attribute_table
[] =
1506 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1507 affects_type_identity } */
1508 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1510 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1512 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1514 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1516 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1518 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1519 SUBTARGET_ATTRIBUTE_TABLE
,
1521 { NULL
, 0, 0, false, false, false, NULL
, false }
1524 #ifndef TARGET_PROFILE_KERNEL
1525 #define TARGET_PROFILE_KERNEL 0
1528 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1529 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1531 /* Initialize the GCC target structure. */
1532 #undef TARGET_ATTRIBUTE_TABLE
1533 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1534 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1535 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1536 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1537 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1539 #undef TARGET_ASM_ALIGNED_DI_OP
1540 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1542 /* Default unaligned ops are only provided for ELF. Find the ops needed
1543 for non-ELF systems. */
1544 #ifndef OBJECT_FORMAT_ELF
1546 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1548 #undef TARGET_ASM_UNALIGNED_HI_OP
1549 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1550 #undef TARGET_ASM_UNALIGNED_SI_OP
1551 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1552 #undef TARGET_ASM_UNALIGNED_DI_OP
1553 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1556 #undef TARGET_ASM_UNALIGNED_HI_OP
1557 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1558 #undef TARGET_ASM_UNALIGNED_SI_OP
1559 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1560 #undef TARGET_ASM_UNALIGNED_DI_OP
1561 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1562 #undef TARGET_ASM_ALIGNED_DI_OP
1563 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1567 /* This hook deals with fixups for relocatable code and DI-mode objects
1569 #undef TARGET_ASM_INTEGER
1570 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1572 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1573 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1574 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1577 #undef TARGET_SET_UP_BY_PROLOGUE
1578 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1580 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1581 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1582 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1583 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1584 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1585 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1586 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1587 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1588 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1589 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1590 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1591 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1593 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1594 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1596 #undef TARGET_INTERNAL_ARG_POINTER
1597 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1599 #undef TARGET_HAVE_TLS
1600 #define TARGET_HAVE_TLS HAVE_AS_TLS
1602 #undef TARGET_CANNOT_FORCE_CONST_MEM
1603 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1605 #undef TARGET_DELEGITIMIZE_ADDRESS
1606 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1608 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1609 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1611 #undef TARGET_LEGITIMATE_COMBINED_INSN
1612 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1614 #undef TARGET_ASM_FUNCTION_PROLOGUE
1615 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1616 #undef TARGET_ASM_FUNCTION_EPILOGUE
1617 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1619 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1620 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1622 #undef TARGET_LEGITIMIZE_ADDRESS
1623 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1625 #undef TARGET_SCHED_VARIABLE_ISSUE
1626 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1628 #undef TARGET_SCHED_ISSUE_RATE
1629 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1630 #undef TARGET_SCHED_ADJUST_COST
1631 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1632 #undef TARGET_SCHED_ADJUST_PRIORITY
1633 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1634 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1635 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1636 #undef TARGET_SCHED_INIT
1637 #define TARGET_SCHED_INIT rs6000_sched_init
1638 #undef TARGET_SCHED_FINISH
1639 #define TARGET_SCHED_FINISH rs6000_sched_finish
1640 #undef TARGET_SCHED_REORDER
1641 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1642 #undef TARGET_SCHED_REORDER2
1643 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1645 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1646 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1648 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1649 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1651 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1652 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1653 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1654 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1655 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1656 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1657 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1658 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1660 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1661 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1663 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1664 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1665 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1666 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1667 rs6000_builtin_support_vector_misalignment
1668 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1669 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1670 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1671 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1672 rs6000_builtin_vectorization_cost
1673 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1674 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1675 rs6000_preferred_simd_mode
1676 #undef TARGET_VECTORIZE_INIT_COST
1677 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1678 #undef TARGET_VECTORIZE_ADD_STMT_COST
1679 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1680 #undef TARGET_VECTORIZE_FINISH_COST
1681 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1682 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1683 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1685 #undef TARGET_INIT_BUILTINS
1686 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1687 #undef TARGET_BUILTIN_DECL
1688 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1690 #undef TARGET_FOLD_BUILTIN
1691 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1692 #undef TARGET_GIMPLE_FOLD_BUILTIN
1693 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1695 #undef TARGET_EXPAND_BUILTIN
1696 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1698 #undef TARGET_MANGLE_TYPE
1699 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1701 #undef TARGET_INIT_LIBFUNCS
1702 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1705 #undef TARGET_BINDS_LOCAL_P
1706 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1709 #undef TARGET_MS_BITFIELD_LAYOUT_P
1710 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1712 #undef TARGET_ASM_OUTPUT_MI_THUNK
1713 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1715 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1716 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1718 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1719 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1721 #undef TARGET_REGISTER_MOVE_COST
1722 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1723 #undef TARGET_MEMORY_MOVE_COST
1724 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1725 #undef TARGET_CANNOT_COPY_INSN_P
1726 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1727 #undef TARGET_RTX_COSTS
1728 #define TARGET_RTX_COSTS rs6000_rtx_costs
1729 #undef TARGET_ADDRESS_COST
1730 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1731 #undef TARGET_INSN_COST
1732 #define TARGET_INSN_COST rs6000_insn_cost
1734 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1735 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1737 #undef TARGET_PROMOTE_FUNCTION_MODE
1738 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1740 #undef TARGET_RETURN_IN_MEMORY
1741 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1743 #undef TARGET_RETURN_IN_MSB
1744 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1746 #undef TARGET_SETUP_INCOMING_VARARGS
1747 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1749 /* Always strict argument naming on rs6000. */
1750 #undef TARGET_STRICT_ARGUMENT_NAMING
1751 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1752 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1753 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1754 #undef TARGET_SPLIT_COMPLEX_ARG
1755 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1756 #undef TARGET_MUST_PASS_IN_STACK
1757 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1758 #undef TARGET_PASS_BY_REFERENCE
1759 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1760 #undef TARGET_ARG_PARTIAL_BYTES
1761 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1762 #undef TARGET_FUNCTION_ARG_ADVANCE
1763 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1764 #undef TARGET_FUNCTION_ARG
1765 #define TARGET_FUNCTION_ARG rs6000_function_arg
1766 #undef TARGET_FUNCTION_ARG_PADDING
1767 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1768 #undef TARGET_FUNCTION_ARG_BOUNDARY
1769 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1771 #undef TARGET_BUILD_BUILTIN_VA_LIST
1772 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1774 #undef TARGET_EXPAND_BUILTIN_VA_START
1775 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1777 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1778 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1780 #undef TARGET_EH_RETURN_FILTER_MODE
1781 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1783 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1784 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1786 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1787 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1789 #undef TARGET_FLOATN_MODE
1790 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1792 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1793 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1795 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1796 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1798 #undef TARGET_MD_ASM_ADJUST
1799 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1801 #undef TARGET_OPTION_OVERRIDE
1802 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1804 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1805 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1806 rs6000_builtin_vectorized_function
1808 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1809 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1810 rs6000_builtin_md_vectorized_function
1812 #undef TARGET_STACK_PROTECT_GUARD
1813 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1816 #undef TARGET_STACK_PROTECT_FAIL
1817 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1821 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1822 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1825 /* Use a 32-bit anchor range. This leads to sequences like:
1827 addis tmp,anchor,high
1830 where tmp itself acts as an anchor, and can be shared between
1831 accesses to the same 64k page. */
1832 #undef TARGET_MIN_ANCHOR_OFFSET
1833 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1834 #undef TARGET_MAX_ANCHOR_OFFSET
1835 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1836 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1837 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1838 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1839 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1841 #undef TARGET_BUILTIN_RECIPROCAL
1842 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1844 #undef TARGET_SECONDARY_RELOAD
1845 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1846 #undef TARGET_SECONDARY_MEMORY_NEEDED
1847 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1848 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1849 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1851 #undef TARGET_LEGITIMATE_ADDRESS_P
1852 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1854 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1855 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1857 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1858 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1860 #undef TARGET_CAN_ELIMINATE
1861 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1863 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1864 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1866 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1867 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1869 #undef TARGET_TRAMPOLINE_INIT
1870 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1872 #undef TARGET_FUNCTION_VALUE
1873 #define TARGET_FUNCTION_VALUE rs6000_function_value
1875 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1876 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1878 #undef TARGET_OPTION_SAVE
1879 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1881 #undef TARGET_OPTION_RESTORE
1882 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1884 #undef TARGET_OPTION_PRINT
1885 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1887 #undef TARGET_CAN_INLINE_P
1888 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1890 #undef TARGET_SET_CURRENT_FUNCTION
1891 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1893 #undef TARGET_LEGITIMATE_CONSTANT_P
1894 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1896 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1897 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1899 #undef TARGET_CAN_USE_DOLOOP_P
1900 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1902 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1903 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1905 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1906 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1907 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1908 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1909 #undef TARGET_UNWIND_WORD_MODE
1910 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1912 #undef TARGET_OFFLOAD_OPTIONS
1913 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1915 #undef TARGET_C_MODE_FOR_SUFFIX
1916 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1918 #undef TARGET_INVALID_BINARY_OP
1919 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1921 #undef TARGET_OPTAB_SUPPORTED_P
1922 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1924 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1925 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1927 #undef TARGET_COMPARE_VERSION_PRIORITY
1928 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1930 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1931 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1932 rs6000_generate_version_dispatcher_body
1934 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1935 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1936 rs6000_get_function_versions_dispatcher
1938 #undef TARGET_OPTION_FUNCTION_VERSIONS
1939 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1941 #undef TARGET_HARD_REGNO_NREGS
1942 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1943 #undef TARGET_HARD_REGNO_MODE_OK
1944 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1946 #undef TARGET_MODES_TIEABLE_P
1947 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1949 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1950 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1951 rs6000_hard_regno_call_part_clobbered
1953 #undef TARGET_SLOW_UNALIGNED_ACCESS
1954 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1956 #undef TARGET_CAN_CHANGE_MODE_CLASS
1957 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1959 #undef TARGET_CONSTANT_ALIGNMENT
1960 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1962 #undef TARGET_STARTING_FRAME_OFFSET
1963 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1966 /* Processor table. */
1969 const char *const name
; /* Canonical processor name. */
1970 const enum processor_type processor
; /* Processor type enum value. */
1971 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1974 static struct rs6000_ptt
const processor_target_table
[] =
1976 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1977 #include "rs6000-cpus.def"
1981 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1985 rs6000_cpu_name_lookup (const char *name
)
1991 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1992 if (! strcmp (name
, processor_target_table
[i
].name
))
2000 /* Return number of consecutive hard regs needed starting at reg REGNO
2001 to hold something of mode MODE.
2002 This is ordinarily the length in words of a value of mode MODE
2003 but can be less for certain modes in special long registers.
2005 POWER and PowerPC GPRs hold 32 bits worth;
2006 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2009 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2011 unsigned HOST_WIDE_INT reg_size
;
2013 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2014 128-bit floating point that can go in vector registers, which has VSX
2015 memory addressing. */
2016 if (FP_REGNO_P (regno
))
2017 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2018 ? UNITS_PER_VSX_WORD
2019 : UNITS_PER_FP_WORD
);
2021 else if (ALTIVEC_REGNO_P (regno
))
2022 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2025 reg_size
= UNITS_PER_WORD
;
2027 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2030 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2033 rs6000_hard_regno_mode_ok_uncached (int regno
, machine_mode mode
)
2035 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2037 if (COMPLEX_MODE_P (mode
))
2038 mode
= GET_MODE_INNER (mode
);
2040 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2041 register combinations, and use PTImode where we need to deal with quad
2042 word memory operations. Don't allow quad words in the argument or frame
2043 pointer registers, just registers 0..31. */
2044 if (mode
== PTImode
)
2045 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2046 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2047 && ((regno
& 1) == 0));
2049 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2050 implementations. Don't allow an item to be split between a FP register
2051 and an Altivec register. Allow TImode in all VSX registers if the user
2053 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2054 && (VECTOR_MEM_VSX_P (mode
)
2055 || FLOAT128_VECTOR_P (mode
)
2056 || reg_addr
[mode
].scalar_in_vmx_p
2058 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2060 if (FP_REGNO_P (regno
))
2061 return FP_REGNO_P (last_regno
);
2063 if (ALTIVEC_REGNO_P (regno
))
2065 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2068 return ALTIVEC_REGNO_P (last_regno
);
2072 /* The GPRs can hold any mode, but values bigger than one register
2073 cannot go past R31. */
2074 if (INT_REGNO_P (regno
))
2075 return INT_REGNO_P (last_regno
);
2077 /* The float registers (except for VSX vector modes) can only hold floating
2078 modes and DImode. */
2079 if (FP_REGNO_P (regno
))
2081 if (FLOAT128_VECTOR_P (mode
))
2084 if (SCALAR_FLOAT_MODE_P (mode
)
2085 && (mode
!= TDmode
|| (regno
% 2) == 0)
2086 && FP_REGNO_P (last_regno
))
2089 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2091 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2094 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2097 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2101 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
2102 && PAIRED_VECTOR_MODE (mode
))
2108 /* The CR register can only hold CC modes. */
2109 if (CR_REGNO_P (regno
))
2110 return GET_MODE_CLASS (mode
) == MODE_CC
;
2112 if (CA_REGNO_P (regno
))
2113 return mode
== Pmode
|| mode
== SImode
;
2115 /* AltiVec only in AldyVec registers. */
2116 if (ALTIVEC_REGNO_P (regno
))
2117 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2118 || mode
== V1TImode
);
2120 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2121 and it must be able to fit within the register set. */
2123 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2126 /* Implement TARGET_HARD_REGNO_NREGS. */
2129 rs6000_hard_regno_nregs_hook (unsigned int regno
, machine_mode mode
)
2131 return rs6000_hard_regno_nregs
[mode
][regno
];
2134 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2137 rs6000_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2139 return rs6000_hard_regno_mode_ok_p
[mode
][regno
];
2142 /* Implement TARGET_MODES_TIEABLE_P.
2144 PTImode cannot tie with other modes because PTImode is restricted to even
2145 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2148 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2149 128-bit floating point on VSX systems ties with other vectors. */
2152 rs6000_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2154 if (mode1
== PTImode
)
2155 return mode2
== PTImode
;
2156 if (mode2
== PTImode
)
2159 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1
))
2160 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2
);
2161 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2
))
2164 if (SCALAR_FLOAT_MODE_P (mode1
))
2165 return SCALAR_FLOAT_MODE_P (mode2
);
2166 if (SCALAR_FLOAT_MODE_P (mode2
))
2169 if (GET_MODE_CLASS (mode1
) == MODE_CC
)
2170 return GET_MODE_CLASS (mode2
) == MODE_CC
;
2171 if (GET_MODE_CLASS (mode2
) == MODE_CC
)
2174 if (PAIRED_VECTOR_MODE (mode1
))
2175 return PAIRED_VECTOR_MODE (mode2
);
2176 if (PAIRED_VECTOR_MODE (mode2
))
2182 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2185 rs6000_hard_regno_call_part_clobbered (unsigned int regno
, machine_mode mode
)
2189 && GET_MODE_SIZE (mode
) > 4
2190 && INT_REGNO_P (regno
))
2194 && FP_REGNO_P (regno
)
2195 && GET_MODE_SIZE (mode
) > 8
2196 && !FLOAT128_2REG_P (mode
))
2202 /* Print interesting facts about registers. */
2204 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2208 for (r
= first_regno
; r
<= last_regno
; ++r
)
2210 const char *comma
= "";
2213 if (first_regno
== last_regno
)
2214 fprintf (stderr
, "%s:\t", reg_name
);
2216 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2219 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2220 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2224 fprintf (stderr
, ",\n\t");
2229 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2230 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2231 rs6000_hard_regno_nregs
[m
][r
]);
2233 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2238 if (call_used_regs
[r
])
2242 fprintf (stderr
, ",\n\t");
2247 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2255 fprintf (stderr
, ",\n\t");
2260 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2266 fprintf (stderr
, ",\n\t");
2270 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2271 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2276 fprintf (stderr
, ",\n\t");
2280 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2285 rs6000_debug_vector_unit (enum rs6000_vector v
)
2291 case VECTOR_NONE
: ret
= "none"; break;
2292 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2293 case VECTOR_VSX
: ret
= "vsx"; break;
2294 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2295 case VECTOR_PAIRED
: ret
= "paired"; break;
2296 case VECTOR_OTHER
: ret
= "other"; break;
2297 default: ret
= "unknown"; break;
2303 /* Inner function printing just the address mask for a particular reload
2305 DEBUG_FUNCTION
char *
2306 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2311 if ((mask
& RELOAD_REG_VALID
) != 0)
2313 else if (keep_spaces
)
2316 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2318 else if (keep_spaces
)
2321 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2323 else if (keep_spaces
)
2326 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2328 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2330 else if (keep_spaces
)
2333 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2335 else if (keep_spaces
)
2338 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2340 else if (keep_spaces
)
2343 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2345 else if (keep_spaces
)
2353 /* Print the address masks in a human readble fashion. */
2355 rs6000_debug_print_mode (ssize_t m
)
2361 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2362 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2363 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2364 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2366 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2367 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2368 fprintf (stderr
, " Reload=%c%c",
2369 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2370 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2372 spaces
+= sizeof (" Reload=sl") - 1;
2374 if (reg_addr
[m
].scalar_in_vmx_p
)
2376 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2380 spaces
+= sizeof (" Upper=y") - 1;
2382 fuse_extra_p
= ((reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2383 || reg_addr
[m
].fused_toc
);
2386 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2388 if (rc
!= RELOAD_REG_ANY
)
2390 if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2391 || reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2392 || reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
2393 || reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
2394 || reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2396 fuse_extra_p
= true;
2405 fprintf (stderr
, "%*s Fuse:", spaces
, "");
2408 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2410 if (rc
!= RELOAD_REG_ANY
)
2414 if (reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
)
2416 else if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
)
2421 if (reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2423 else if (reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
)
2428 if (load
== '-' && store
== '-')
2432 fprintf (stderr
, "%*s%c=%c%c", (spaces
+ 1), "",
2433 reload_reg_map
[rc
].name
[0], load
, store
);
2439 if (reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2441 fprintf (stderr
, "%*sP8gpr", (spaces
+ 1), "");
2445 spaces
+= sizeof (" P8gpr") - 1;
2447 if (reg_addr
[m
].fused_toc
)
2449 fprintf (stderr
, "%*sToc", (spaces
+ 1), "");
2453 spaces
+= sizeof (" Toc") - 1;
2456 spaces
+= sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2458 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2459 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2461 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2463 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2464 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2467 fputs ("\n", stderr
);
2470 #define DEBUG_FMT_ID "%-32s= "
2471 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2472 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2473 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2475 /* Print various interesting information with -mdebug=reg. */
2477 rs6000_debug_reg_global (void)
2479 static const char *const tf
[2] = { "false", "true" };
2480 const char *nl
= (const char *)0;
2483 char costly_num
[20];
2485 char flags_buffer
[40];
2486 const char *costly_str
;
2487 const char *nop_str
;
2488 const char *trace_str
;
2489 const char *abi_str
;
2490 const char *cmodel_str
;
2491 struct cl_target_option cl_opts
;
2493 /* Modes we want tieable information on. */
2494 static const machine_mode print_tieable_modes
[] = {
2530 /* Virtual regs we are interested in. */
2531 const static struct {
2532 int regno
; /* register number. */
2533 const char *name
; /* register name. */
2534 } virtual_regs
[] = {
2535 { STACK_POINTER_REGNUM
, "stack pointer:" },
2536 { TOC_REGNUM
, "toc: " },
2537 { STATIC_CHAIN_REGNUM
, "static chain: " },
2538 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2539 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2540 { ARG_POINTER_REGNUM
, "arg pointer: " },
2541 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2542 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2543 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2544 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2545 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2546 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2547 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2548 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2549 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2550 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2553 fputs ("\nHard register information:\n", stderr
);
2554 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2555 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2556 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2559 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2560 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2561 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2562 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2563 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2564 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2566 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2567 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2568 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2572 "d reg_class = %s\n"
2573 "f reg_class = %s\n"
2574 "v reg_class = %s\n"
2575 "wa reg_class = %s\n"
2576 "wb reg_class = %s\n"
2577 "wd reg_class = %s\n"
2578 "we reg_class = %s\n"
2579 "wf reg_class = %s\n"
2580 "wg reg_class = %s\n"
2581 "wh reg_class = %s\n"
2582 "wi reg_class = %s\n"
2583 "wj reg_class = %s\n"
2584 "wk reg_class = %s\n"
2585 "wl reg_class = %s\n"
2586 "wm reg_class = %s\n"
2587 "wo reg_class = %s\n"
2588 "wp reg_class = %s\n"
2589 "wq reg_class = %s\n"
2590 "wr reg_class = %s\n"
2591 "ws reg_class = %s\n"
2592 "wt reg_class = %s\n"
2593 "wu reg_class = %s\n"
2594 "wv reg_class = %s\n"
2595 "ww reg_class = %s\n"
2596 "wx reg_class = %s\n"
2597 "wy reg_class = %s\n"
2598 "wz reg_class = %s\n"
2599 "wA reg_class = %s\n"
2600 "wH reg_class = %s\n"
2601 "wI reg_class = %s\n"
2602 "wJ reg_class = %s\n"
2603 "wK reg_class = %s\n"
2605 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2606 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2607 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2608 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2609 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2610 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2611 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2612 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2613 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2614 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2615 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2616 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2617 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2618 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2619 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2620 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2621 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2622 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2623 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2624 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2625 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2626 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2627 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2628 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2629 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2630 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2631 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2632 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2633 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2634 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2635 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2636 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2639 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2640 rs6000_debug_print_mode (m
);
2642 fputs ("\n", stderr
);
2644 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2646 machine_mode mode1
= print_tieable_modes
[m1
];
2647 bool first_time
= true;
2649 nl
= (const char *)0;
2650 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2652 machine_mode mode2
= print_tieable_modes
[m2
];
2653 if (mode1
!= mode2
&& rs6000_modes_tieable_p (mode1
, mode2
))
2657 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2662 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2667 fputs ("\n", stderr
);
2673 if (rs6000_recip_control
)
2675 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2677 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2678 if (rs6000_recip_bits
[m
])
2681 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2683 (RS6000_RECIP_AUTO_RE_P (m
)
2685 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2686 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2688 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2691 fputs ("\n", stderr
);
2694 if (rs6000_cpu_index
>= 0)
2696 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2698 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2700 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2701 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2704 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2706 if (rs6000_tune_index
>= 0)
2708 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2710 = processor_target_table
[rs6000_tune_index
].target_enable
;
2712 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2713 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2716 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2718 cl_target_option_save (&cl_opts
, &global_options
);
2719 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2722 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2723 rs6000_isa_flags_explicit
);
2725 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2726 rs6000_builtin_mask
);
2728 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2730 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2731 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2733 switch (rs6000_sched_costly_dep
)
2735 case max_dep_latency
:
2736 costly_str
= "max_dep_latency";
2740 costly_str
= "no_dep_costly";
2743 case all_deps_costly
:
2744 costly_str
= "all_deps_costly";
2747 case true_store_to_load_dep_costly
:
2748 costly_str
= "true_store_to_load_dep_costly";
2751 case store_to_load_dep_costly
:
2752 costly_str
= "store_to_load_dep_costly";
2756 costly_str
= costly_num
;
2757 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2761 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2763 switch (rs6000_sched_insert_nops
)
2765 case sched_finish_regroup_exact
:
2766 nop_str
= "sched_finish_regroup_exact";
2769 case sched_finish_pad_groups
:
2770 nop_str
= "sched_finish_pad_groups";
2773 case sched_finish_none
:
2774 nop_str
= "sched_finish_none";
2779 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2783 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2785 switch (rs6000_sdata
)
2792 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2796 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2800 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2805 switch (rs6000_traceback
)
2807 case traceback_default
: trace_str
= "default"; break;
2808 case traceback_none
: trace_str
= "none"; break;
2809 case traceback_part
: trace_str
= "part"; break;
2810 case traceback_full
: trace_str
= "full"; break;
2811 default: trace_str
= "unknown"; break;
2814 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2816 switch (rs6000_current_cmodel
)
2818 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2819 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2820 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2821 default: cmodel_str
= "unknown"; break;
2824 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2826 switch (rs6000_current_abi
)
2828 case ABI_NONE
: abi_str
= "none"; break;
2829 case ABI_AIX
: abi_str
= "aix"; break;
2830 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2831 case ABI_V4
: abi_str
= "V4"; break;
2832 case ABI_DARWIN
: abi_str
= "darwin"; break;
2833 default: abi_str
= "unknown"; break;
2836 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2838 if (rs6000_altivec_abi
)
2839 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2841 if (rs6000_darwin64_abi
)
2842 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2844 fprintf (stderr
, DEBUG_FMT_S
, "single_float",
2845 (TARGET_SINGLE_FLOAT
? "true" : "false"));
2847 fprintf (stderr
, DEBUG_FMT_S
, "double_float",
2848 (TARGET_DOUBLE_FLOAT
? "true" : "false"));
2850 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2851 (TARGET_SOFT_FLOAT
? "true" : "false"));
2853 if (TARGET_LINK_STACK
)
2854 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2856 if (TARGET_P8_FUSION
)
2860 strcpy (options
, (TARGET_P9_FUSION
) ? "power9" : "power8");
2861 if (TARGET_TOC_FUSION
)
2862 strcat (options
, ", toc");
2864 if (TARGET_P8_FUSION_SIGN
)
2865 strcat (options
, ", sign");
2867 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2870 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2871 TARGET_SECURE_PLT
? "secure" : "bss");
2872 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2873 aix_struct_return
? "aix" : "sysv");
2874 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2875 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2876 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2877 tf
[!!rs6000_align_branch_targets
]);
2878 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2879 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2880 rs6000_long_double_type_size
);
2881 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2882 (int)rs6000_sched_restricted_insns_priority
);
2883 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2885 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2886 (int)RS6000_BUILTIN_COUNT
);
2888 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2889 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2892 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2893 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2895 if (TARGET_DIRECT_MOVE_128
)
2896 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2897 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2901 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2902 legitimate address support to figure out the appropriate addressing to
2906 rs6000_setup_reg_addr_masks (void)
2908 ssize_t rc
, reg
, m
, nregs
;
2909 addr_mask_type any_addr_mask
, addr_mask
;
2911 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2913 machine_mode m2
= (machine_mode
) m
;
2914 bool complex_p
= false;
2915 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2918 if (COMPLEX_MODE_P (m2
))
2921 m2
= GET_MODE_INNER (m2
);
2924 msize
= GET_MODE_SIZE (m2
);
2926 /* SDmode is special in that we want to access it only via REG+REG
2927 addressing on power7 and above, since we want to use the LFIWZX and
2928 STFIWZX instructions to load it. */
2929 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2932 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2935 reg
= reload_reg_map
[rc
].reg
;
2937 /* Can mode values go in the GPR/FPR/Altivec registers? */
2938 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2940 bool small_int_vsx_p
= (small_int_p
2941 && (rc
== RELOAD_REG_FPR
2942 || rc
== RELOAD_REG_VMX
));
2944 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2945 addr_mask
|= RELOAD_REG_VALID
;
2947 /* Indicate if the mode takes more than 1 physical register. If
2948 it takes a single register, indicate it can do REG+REG
2949 addressing. Small integers in VSX registers can only do
2950 REG+REG addressing. */
2951 if (small_int_vsx_p
)
2952 addr_mask
|= RELOAD_REG_INDEXED
;
2953 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2954 addr_mask
|= RELOAD_REG_MULTIPLE
;
2956 addr_mask
|= RELOAD_REG_INDEXED
;
2958 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2959 addressing. If we allow scalars into Altivec registers,
2960 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2963 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2965 && !VECTOR_MODE_P (m2
)
2966 && !FLOAT128_VECTOR_P (m2
)
2968 && !small_int_vsx_p
)
2970 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2972 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2973 we don't allow PRE_MODIFY for some multi-register
2978 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2982 if (TARGET_POWERPC64
)
2983 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2989 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2995 /* GPR and FPR registers can do REG+OFFSET addressing, except
2996 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2997 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2998 if ((addr_mask
!= 0) && !indexed_only_p
3000 && (rc
== RELOAD_REG_GPR
3001 || ((msize
== 8 || m2
== SFmode
)
3002 && (rc
== RELOAD_REG_FPR
3003 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
3004 addr_mask
|= RELOAD_REG_OFFSET
;
3006 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3007 instructions are enabled. The offset for 128-bit VSX registers is
3008 only 12-bits. While GPRs can handle the full offset range, VSX
3009 registers can only handle the restricted range. */
3010 else if ((addr_mask
!= 0) && !indexed_only_p
3011 && msize
== 16 && TARGET_P9_VECTOR
3012 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
3013 || (m2
== TImode
&& TARGET_VSX
)))
3015 addr_mask
|= RELOAD_REG_OFFSET
;
3016 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
3017 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
3020 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3021 addressing on 128-bit types. */
3022 if (rc
== RELOAD_REG_VMX
&& msize
== 16
3023 && (addr_mask
& RELOAD_REG_VALID
) != 0)
3024 addr_mask
|= RELOAD_REG_AND_M16
;
3026 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
3027 any_addr_mask
|= addr_mask
;
3030 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
3035 /* Initialize the various global tables that are based on register size. */
3037 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
3043 /* Precalculate REGNO_REG_CLASS. */
3044 rs6000_regno_regclass
[0] = GENERAL_REGS
;
3045 for (r
= 1; r
< 32; ++r
)
3046 rs6000_regno_regclass
[r
] = BASE_REGS
;
3048 for (r
= 32; r
< 64; ++r
)
3049 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
3051 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3052 rs6000_regno_regclass
[r
] = NO_REGS
;
3054 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
3055 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
3057 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
3058 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
3059 rs6000_regno_regclass
[r
] = CR_REGS
;
3061 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
3062 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
3063 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
3064 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
3065 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
3066 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
3067 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
3068 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
3069 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
3070 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
3072 /* Precalculate register class to simpler reload register class. We don't
3073 need all of the register classes that are combinations of different
3074 classes, just the simple ones that have constraint letters. */
3075 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3076 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3078 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3079 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3080 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3081 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3082 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3083 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3084 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3085 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3086 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3087 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3091 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3092 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3096 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3097 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3100 /* Precalculate the valid memory formats as well as the vector information,
3101 this must be set up before the rs6000_hard_regno_nregs_internal calls
3103 gcc_assert ((int)VECTOR_NONE
== 0);
3104 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3105 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3107 gcc_assert ((int)CODE_FOR_nothing
== 0);
3108 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3110 gcc_assert ((int)NO_REGS
== 0);
3111 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3113 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3114 believes it can use native alignment or still uses 128-bit alignment. */
3115 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3126 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3127 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3128 if (TARGET_FLOAT128_TYPE
)
3130 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3131 rs6000_vector_align
[KFmode
] = 128;
3133 if (FLOAT128_IEEE_P (TFmode
))
3135 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3136 rs6000_vector_align
[TFmode
] = 128;
3140 /* V2DF mode, VSX only. */
3143 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3144 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3145 rs6000_vector_align
[V2DFmode
] = align64
;
3148 /* V4SF mode, either VSX or Altivec. */
3151 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3152 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3153 rs6000_vector_align
[V4SFmode
] = align32
;
3155 else if (TARGET_ALTIVEC
)
3157 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3158 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3159 rs6000_vector_align
[V4SFmode
] = align32
;
3162 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3166 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3167 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3168 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3169 rs6000_vector_align
[V4SImode
] = align32
;
3170 rs6000_vector_align
[V8HImode
] = align32
;
3171 rs6000_vector_align
[V16QImode
] = align32
;
3175 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3176 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3177 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3181 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3182 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3183 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3187 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3188 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3191 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3192 rs6000_vector_unit
[V2DImode
]
3193 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3194 rs6000_vector_align
[V2DImode
] = align64
;
3196 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3197 rs6000_vector_unit
[V1TImode
]
3198 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3199 rs6000_vector_align
[V1TImode
] = 128;
3202 /* DFmode, see if we want to use the VSX unit. Memory is handled
3203 differently, so don't set rs6000_vector_mem. */
3206 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3207 rs6000_vector_align
[DFmode
] = 64;
3210 /* SFmode, see if we want to use the VSX unit. */
3211 if (TARGET_P8_VECTOR
)
3213 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3214 rs6000_vector_align
[SFmode
] = 32;
3217 /* Allow TImode in VSX register and set the VSX memory macros. */
3220 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3221 rs6000_vector_align
[TImode
] = align64
;
3224 /* TODO add paired floating point vector support. */
3226 /* Register class constraints for the constraints that depend on compile
3227 switches. When the VSX code was added, different constraints were added
3228 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3229 of the VSX registers are used. The register classes for scalar floating
3230 point types is set, based on whether we allow that type into the upper
3231 (Altivec) registers. GCC has register classes to target the Altivec
3232 registers for load/store operations, to select using a VSX memory
3233 operation instead of the traditional floating point operation. The
3236 d - Register class to use with traditional DFmode instructions.
3237 f - Register class to use with traditional SFmode instructions.
3238 v - Altivec register.
3239 wa - Any VSX register.
3240 wc - Reserved to represent individual CR bits (used in LLVM).
3241 wd - Preferred register class for V2DFmode.
3242 wf - Preferred register class for V4SFmode.
3243 wg - Float register for power6x move insns.
3244 wh - FP register for direct move instructions.
3245 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3246 wj - FP or VSX register to hold 64-bit integers for direct moves.
3247 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3248 wl - Float register if we can do 32-bit signed int loads.
3249 wm - VSX register for ISA 2.07 direct move operations.
3250 wn - always NO_REGS.
3251 wr - GPR if 64-bit mode is permitted.
3252 ws - Register class to do ISA 2.06 DF operations.
3253 wt - VSX register for TImode in VSX registers.
3254 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3255 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3256 ww - Register class to do SF conversions in with VSX operations.
3257 wx - Float register if we can do 32-bit int stores.
3258 wy - Register class to do ISA 2.07 SF operations.
3259 wz - Float register if we can do 32-bit unsigned int loads.
3260 wH - Altivec register if SImode is allowed in VSX registers.
3261 wI - VSX register if SImode is allowed in VSX registers.
3262 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3263 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3265 if (TARGET_HARD_FLOAT
)
3266 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3268 if (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
3269 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3273 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3274 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3275 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3276 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3277 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3278 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3279 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3282 /* Add conditional constraints based on various options, to allow us to
3283 collapse multiple insn patterns. */
3285 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3287 if (TARGET_MFPGPR
) /* DFmode */
3288 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3291 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3293 if (TARGET_DIRECT_MOVE
)
3295 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3296 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3297 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3298 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3299 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3300 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3303 if (TARGET_POWERPC64
)
3305 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3306 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3309 if (TARGET_P8_VECTOR
) /* SFmode */
3311 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3312 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3313 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3315 else if (TARGET_VSX
)
3316 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3319 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3322 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3324 if (TARGET_FLOAT128_TYPE
)
3326 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3327 if (FLOAT128_IEEE_P (TFmode
))
3328 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3331 if (TARGET_P9_VECTOR
)
3333 /* Support for new D-form instructions. */
3334 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3336 /* Support for ISA 3.0 (power9) vectors. */
3337 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3340 /* Support for new direct moves (ISA 3.0 + 64bit). */
3341 if (TARGET_DIRECT_MOVE_128
)
3342 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3344 /* Support small integers in VSX registers. */
3345 if (TARGET_P8_VECTOR
)
3347 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3348 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3349 if (TARGET_P9_VECTOR
)
3351 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3352 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3356 /* Set up the reload helper and direct move functions. */
3357 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3361 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3362 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3363 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3364 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3365 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3366 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3367 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3368 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3369 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3370 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3371 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3372 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3373 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3374 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3375 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3376 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3377 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3378 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3379 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3380 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3382 if (FLOAT128_VECTOR_P (KFmode
))
3384 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3385 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3388 if (FLOAT128_VECTOR_P (TFmode
))
3390 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3391 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3394 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3396 if (TARGET_NO_SDMODE_STACK
)
3398 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3399 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3404 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3405 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3408 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3410 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3411 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3412 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3413 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3414 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3415 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3416 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3417 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3418 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3420 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3421 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3422 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3423 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3424 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3425 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3426 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3427 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3428 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3430 if (FLOAT128_VECTOR_P (KFmode
))
3432 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3433 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3436 if (FLOAT128_VECTOR_P (TFmode
))
3438 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3439 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3445 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3446 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3447 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3448 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3449 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3450 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3451 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3452 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3453 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3454 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3455 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3456 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3457 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3458 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3459 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3460 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3461 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3462 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3463 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3464 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3466 if (FLOAT128_VECTOR_P (KFmode
))
3468 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3469 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3472 if (FLOAT128_IEEE_P (TFmode
))
3474 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3475 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3478 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3480 if (TARGET_NO_SDMODE_STACK
)
3482 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3483 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3488 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3489 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3492 if (TARGET_DIRECT_MOVE
)
3494 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3495 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3496 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3500 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3501 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3503 if (TARGET_P8_VECTOR
)
3505 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3506 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3508 if (TARGET_P9_VECTOR
)
3510 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3511 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3516 /* Setup the fusion operations. */
3517 if (TARGET_P8_FUSION
)
3519 reg_addr
[QImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_qi
;
3520 reg_addr
[HImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_hi
;
3521 reg_addr
[SImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_si
;
3523 reg_addr
[DImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_di
;
3526 if (TARGET_P9_FUSION
)
3529 enum machine_mode mode
; /* mode of the fused type. */
3530 enum machine_mode pmode
; /* pointer mode. */
3531 enum rs6000_reload_reg_type rtype
; /* register type. */
3532 enum insn_code load
; /* load insn. */
3533 enum insn_code store
; /* store insn. */
3536 static const struct fuse_insns addis_insns
[] = {
3537 { E_SFmode
, E_DImode
, RELOAD_REG_FPR
,
3538 CODE_FOR_fusion_vsx_di_sf_load
,
3539 CODE_FOR_fusion_vsx_di_sf_store
},
3541 { E_SFmode
, E_SImode
, RELOAD_REG_FPR
,
3542 CODE_FOR_fusion_vsx_si_sf_load
,
3543 CODE_FOR_fusion_vsx_si_sf_store
},
3545 { E_DFmode
, E_DImode
, RELOAD_REG_FPR
,
3546 CODE_FOR_fusion_vsx_di_df_load
,
3547 CODE_FOR_fusion_vsx_di_df_store
},
3549 { E_DFmode
, E_SImode
, RELOAD_REG_FPR
,
3550 CODE_FOR_fusion_vsx_si_df_load
,
3551 CODE_FOR_fusion_vsx_si_df_store
},
3553 { E_DImode
, E_DImode
, RELOAD_REG_FPR
,
3554 CODE_FOR_fusion_vsx_di_di_load
,
3555 CODE_FOR_fusion_vsx_di_di_store
},
3557 { E_DImode
, E_SImode
, RELOAD_REG_FPR
,
3558 CODE_FOR_fusion_vsx_si_di_load
,
3559 CODE_FOR_fusion_vsx_si_di_store
},
3561 { E_QImode
, E_DImode
, RELOAD_REG_GPR
,
3562 CODE_FOR_fusion_gpr_di_qi_load
,
3563 CODE_FOR_fusion_gpr_di_qi_store
},
3565 { E_QImode
, E_SImode
, RELOAD_REG_GPR
,
3566 CODE_FOR_fusion_gpr_si_qi_load
,
3567 CODE_FOR_fusion_gpr_si_qi_store
},
3569 { E_HImode
, E_DImode
, RELOAD_REG_GPR
,
3570 CODE_FOR_fusion_gpr_di_hi_load
,
3571 CODE_FOR_fusion_gpr_di_hi_store
},
3573 { E_HImode
, E_SImode
, RELOAD_REG_GPR
,
3574 CODE_FOR_fusion_gpr_si_hi_load
,
3575 CODE_FOR_fusion_gpr_si_hi_store
},
3577 { E_SImode
, E_DImode
, RELOAD_REG_GPR
,
3578 CODE_FOR_fusion_gpr_di_si_load
,
3579 CODE_FOR_fusion_gpr_di_si_store
},
3581 { E_SImode
, E_SImode
, RELOAD_REG_GPR
,
3582 CODE_FOR_fusion_gpr_si_si_load
,
3583 CODE_FOR_fusion_gpr_si_si_store
},
3585 { E_SFmode
, E_DImode
, RELOAD_REG_GPR
,
3586 CODE_FOR_fusion_gpr_di_sf_load
,
3587 CODE_FOR_fusion_gpr_di_sf_store
},
3589 { E_SFmode
, E_SImode
, RELOAD_REG_GPR
,
3590 CODE_FOR_fusion_gpr_si_sf_load
,
3591 CODE_FOR_fusion_gpr_si_sf_store
},
3593 { E_DImode
, E_DImode
, RELOAD_REG_GPR
,
3594 CODE_FOR_fusion_gpr_di_di_load
,
3595 CODE_FOR_fusion_gpr_di_di_store
},
3597 { E_DFmode
, E_DImode
, RELOAD_REG_GPR
,
3598 CODE_FOR_fusion_gpr_di_df_load
,
3599 CODE_FOR_fusion_gpr_di_df_store
},
3602 machine_mode cur_pmode
= Pmode
;
3605 for (i
= 0; i
< ARRAY_SIZE (addis_insns
); i
++)
3607 machine_mode xmode
= addis_insns
[i
].mode
;
3608 enum rs6000_reload_reg_type rtype
= addis_insns
[i
].rtype
;
3610 if (addis_insns
[i
].pmode
!= cur_pmode
)
3613 if (rtype
== RELOAD_REG_FPR
&& !TARGET_HARD_FLOAT
)
3616 reg_addr
[xmode
].fusion_addis_ld
[rtype
] = addis_insns
[i
].load
;
3617 reg_addr
[xmode
].fusion_addis_st
[rtype
] = addis_insns
[i
].store
;
3619 if (rtype
== RELOAD_REG_FPR
&& TARGET_P9_VECTOR
)
3621 reg_addr
[xmode
].fusion_addis_ld
[RELOAD_REG_VMX
]
3622 = addis_insns
[i
].load
;
3623 reg_addr
[xmode
].fusion_addis_st
[RELOAD_REG_VMX
]
3624 = addis_insns
[i
].store
;
3629 /* Note which types we support fusing TOC setup plus memory insn. We only do
3630 fused TOCs for medium/large code models. */
3631 if (TARGET_P8_FUSION
&& TARGET_TOC_FUSION
&& TARGET_POWERPC64
3632 && (TARGET_CMODEL
!= CMODEL_SMALL
))
3634 reg_addr
[QImode
].fused_toc
= true;
3635 reg_addr
[HImode
].fused_toc
= true;
3636 reg_addr
[SImode
].fused_toc
= true;
3637 reg_addr
[DImode
].fused_toc
= true;
3638 if (TARGET_HARD_FLOAT
)
3640 if (TARGET_SINGLE_FLOAT
)
3641 reg_addr
[SFmode
].fused_toc
= true;
3642 if (TARGET_DOUBLE_FLOAT
)
3643 reg_addr
[DFmode
].fused_toc
= true;
3647 /* Precalculate HARD_REGNO_NREGS. */
3648 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3649 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3650 rs6000_hard_regno_nregs
[m
][r
]
3651 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3653 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3654 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3655 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3656 if (rs6000_hard_regno_mode_ok_uncached (r
, (machine_mode
)m
))
3657 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3659 /* Precalculate CLASS_MAX_NREGS sizes. */
3660 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3664 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3665 reg_size
= UNITS_PER_VSX_WORD
;
3667 else if (c
== ALTIVEC_REGS
)
3668 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3670 else if (c
== FLOAT_REGS
)
3671 reg_size
= UNITS_PER_FP_WORD
;
3674 reg_size
= UNITS_PER_WORD
;
3676 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3678 machine_mode m2
= (machine_mode
)m
;
3679 int reg_size2
= reg_size
;
3681 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3683 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3684 reg_size2
= UNITS_PER_FP_WORD
;
3686 rs6000_class_max_nregs
[m
][c
]
3687 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3691 /* Calculate which modes to automatically generate code to use a the
3692 reciprocal divide and square root instructions. In the future, possibly
3693 automatically generate the instructions even if the user did not specify
3694 -mrecip. The older machines double precision reciprocal sqrt estimate is
3695 not accurate enough. */
3696 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3698 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3700 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3701 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3702 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3703 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3704 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3706 if (TARGET_FRSQRTES
)
3707 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3709 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3710 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3711 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3712 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3713 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3715 if (rs6000_recip_control
)
3717 if (!flag_finite_math_only
)
3718 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3720 if (flag_trapping_math
)
3721 warning (0, "%qs requires %qs or %qs", "-mrecip",
3722 "-fno-trapping-math", "-ffast-math");
3723 if (!flag_reciprocal_math
)
3724 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3726 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3728 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3729 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3730 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3732 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3733 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3734 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3736 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3737 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3738 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3740 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3741 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3742 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3744 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3745 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3746 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3748 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3749 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3750 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3752 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3753 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3754 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3756 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3757 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3758 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3762 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3763 legitimate address support to figure out the appropriate addressing to
3765 rs6000_setup_reg_addr_masks ();
3767 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3769 if (TARGET_DEBUG_REG
)
3770 rs6000_debug_reg_global ();
3772 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3774 "SImode variable mult cost = %d\n"
3775 "SImode constant mult cost = %d\n"
3776 "SImode short constant mult cost = %d\n"
3777 "DImode multipliciation cost = %d\n"
3778 "SImode division cost = %d\n"
3779 "DImode division cost = %d\n"
3780 "Simple fp operation cost = %d\n"
3781 "DFmode multiplication cost = %d\n"
3782 "SFmode division cost = %d\n"
3783 "DFmode division cost = %d\n"
3784 "cache line size = %d\n"
3785 "l1 cache size = %d\n"
3786 "l2 cache size = %d\n"
3787 "simultaneous prefetches = %d\n"
3790 rs6000_cost
->mulsi_const
,
3791 rs6000_cost
->mulsi_const9
,
3799 rs6000_cost
->cache_line_size
,
3800 rs6000_cost
->l1_cache_size
,
3801 rs6000_cost
->l2_cache_size
,
3802 rs6000_cost
->simultaneous_prefetches
);
3807 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3810 darwin_rs6000_override_options (void)
3812 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3814 rs6000_altivec_abi
= 1;
3815 TARGET_ALTIVEC_VRSAVE
= 1;
3816 rs6000_current_abi
= ABI_DARWIN
;
3818 if (DEFAULT_ABI
== ABI_DARWIN
3820 darwin_one_byte_bool
= 1;
3822 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3824 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3825 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3829 rs6000_default_long_calls
= 1;
3830 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3833 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3835 if (!flag_mkernel
&& !flag_apple_kext
3837 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3838 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3840 /* Unless the user (not the configurer) has explicitly overridden
3841 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3842 G4 unless targeting the kernel. */
3845 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3846 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3847 && ! global_options_set
.x_rs6000_cpu_index
)
3849 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3854 /* If not otherwise specified by a target, make 'long double' equivalent to
3857 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3858 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3861 /* Return the builtin mask of the various options used that could affect which
3862 builtins were used. In the past we used target_flags, but we've run out of
3863 bits, and some options like PAIRED are no longer in target_flags. */
3866 rs6000_builtin_mask_calculate (void)
3868 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3869 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3870 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3871 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
3872 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3873 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3874 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3875 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3876 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3877 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3878 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3879 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3880 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3881 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3882 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3883 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3884 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3885 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3886 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3887 | ((TARGET_LONG_DOUBLE_128
) ? RS6000_BTM_LDBL128
: 0)
3888 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0)
3889 | ((TARGET_FLOAT128_HW
) ? RS6000_BTM_FLOAT128_HW
: 0));
3892 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3893 to clobber the XER[CA] bit because clobbering that bit without telling
3894 the compiler worked just fine with versions of GCC before GCC 5, and
3895 breaking a lot of older code in ways that are hard to track down is
3896 not such a great idea. */
3899 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3900 vec
<const char *> &/*constraints*/,
3901 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3903 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3904 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3908 /* Override command line options.
3910 Combine build-specific configuration information with options
3911 specified on the command line to set various state variables which
3912 influence code generation, optimization, and expansion of built-in
3913 functions. Assure that command-line configuration preferences are
3914 compatible with each other and with the build configuration; issue
3915 warnings while adjusting configuration or error messages while
3916 rejecting configuration.
3918 Upon entry to this function:
3920 This function is called once at the beginning of
3921 compilation, and then again at the start and end of compiling
3922 each section of code that has a different configuration, as
3923 indicated, for example, by adding the
3925 __attribute__((__target__("cpu=power9")))
3927 qualifier to a function definition or, for example, by bracketing
3930 #pragma GCC target("altivec")
3934 #pragma GCC reset_options
3936 directives. Parameter global_init_p is true for the initial
3937 invocation, which initializes global variables, and false for all
3938 subsequent invocations.
3941 Various global state information is assumed to be valid. This
3942 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3943 default CPU specified at build configure time, TARGET_DEFAULT,
3944 representing the default set of option flags for the default
3945 target, and global_options_set.x_rs6000_isa_flags, representing
3946 which options were requested on the command line.
3948 Upon return from this function:
3950 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3951 was set by name on the command line. Additionally, if certain
3952 attributes are automatically enabled or disabled by this function
3953 in order to assure compatibility between options and
3954 configuration, the flags associated with those attributes are
3955 also set. By setting these "explicit bits", we avoid the risk
3956 that other code might accidentally overwrite these particular
3957 attributes with "default values".
3959 The various bits of rs6000_isa_flags are set to indicate the
3960 target options that have been selected for the most current
3961 compilation efforts. This has the effect of also turning on the
3962 associated TARGET_XXX values since these are macros which are
3963 generally defined to test the corresponding bit of the
3964 rs6000_isa_flags variable.
3966 The variable rs6000_builtin_mask is set to represent the target
3967 options for the most current compilation efforts, consistent with
3968 the current contents of rs6000_isa_flags. This variable controls
3969 expansion of built-in functions.
3971 Various other global variables and fields of global structures
3972 (over 50 in all) are initialized to reflect the desired options
3973 for the most current compilation efforts. */
3976 rs6000_option_override_internal (bool global_init_p
)
3980 HOST_WIDE_INT set_masks
;
3981 HOST_WIDE_INT ignore_masks
;
3984 struct cl_target_option
*main_target_opt
3985 = ((global_init_p
|| target_option_default_node
== NULL
)
3986 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3988 /* Print defaults. */
3989 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
3990 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
3992 /* Remember the explicit arguments. */
3994 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3996 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3997 library functions, so warn about it. The flag may be useful for
3998 performance studies from time to time though, so don't disable it
4000 if (global_options_set
.x_rs6000_alignment_flags
4001 && rs6000_alignment_flags
== MASK_ALIGN_POWER
4002 && DEFAULT_ABI
== ABI_DARWIN
4004 warning (0, "%qs is not supported for 64-bit Darwin;"
4005 " it is incompatible with the installed C and C++ libraries",
4008 /* Numerous experiment shows that IRA based loop pressure
4009 calculation works better for RTL loop invariant motion on targets
4010 with enough (>= 32) registers. It is an expensive optimization.
4011 So it is on only for peak performance. */
4012 if (optimize
>= 3 && global_init_p
4013 && !global_options_set
.x_flag_ira_loop_pressure
)
4014 flag_ira_loop_pressure
= 1;
4016 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4017 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4018 options were already specified. */
4019 if (flag_sanitize
& SANITIZE_USER_ADDRESS
4020 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
4021 flag_asynchronous_unwind_tables
= 1;
4023 /* Set the pointer size. */
4026 rs6000_pmode
= DImode
;
4027 rs6000_pointer_size
= 64;
4031 rs6000_pmode
= SImode
;
4032 rs6000_pointer_size
= 32;
4035 /* Some OSs don't support saving the high part of 64-bit registers on context
4036 switch. Other OSs don't support saving Altivec registers. On those OSs,
4037 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4038 if the user wants either, the user must explicitly specify them and we
4039 won't interfere with the user's specification. */
4041 set_masks
= POWERPC_MASKS
;
4042 #ifdef OS_MISSING_POWERPC64
4043 if (OS_MISSING_POWERPC64
)
4044 set_masks
&= ~OPTION_MASK_POWERPC64
;
4046 #ifdef OS_MISSING_ALTIVEC
4047 if (OS_MISSING_ALTIVEC
)
4048 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
4049 | OTHER_VSX_VECTOR_MASKS
);
4052 /* Don't override by the processor default if given explicitly. */
4053 set_masks
&= ~rs6000_isa_flags_explicit
;
4055 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4056 the cpu in a target attribute or pragma, but did not specify a tuning
4057 option, use the cpu for the tuning option rather than the option specified
4058 with -mtune on the command line. Process a '--with-cpu' configuration
4059 request as an implicit --cpu. */
4060 if (rs6000_cpu_index
>= 0)
4061 cpu_index
= rs6000_cpu_index
;
4062 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
4063 cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
4064 else if (OPTION_TARGET_CPU_DEFAULT
)
4065 cpu_index
= rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT
);
4069 const char *unavailable_cpu
= NULL
;
4070 switch (processor_target_table
[cpu_index
].processor
)
4072 #ifndef HAVE_AS_POWER9
4073 case PROCESSOR_POWER9
:
4074 unavailable_cpu
= "power9";
4077 #ifndef HAVE_AS_POWER8
4078 case PROCESSOR_POWER8
:
4079 unavailable_cpu
= "power8";
4082 #ifndef HAVE_AS_POPCNTD
4083 case PROCESSOR_POWER7
:
4084 unavailable_cpu
= "power7";
4088 case PROCESSOR_POWER6
:
4089 unavailable_cpu
= "power6";
4092 #ifndef HAVE_AS_POPCNTB
4093 case PROCESSOR_POWER5
:
4094 unavailable_cpu
= "power5";
4100 if (unavailable_cpu
)
4103 warning (0, "will not generate %qs instructions because "
4104 "assembler lacks %qs support", unavailable_cpu
,
4109 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4110 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4111 with those from the cpu, except for options that were explicitly set. If
4112 we don't have a cpu, do not override the target bits set in
4116 rs6000_cpu_index
= cpu_index
;
4117 rs6000_isa_flags
&= ~set_masks
;
4118 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
4123 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4124 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4125 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4126 to using rs6000_isa_flags, we need to do the initialization here.
4128 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4129 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4130 HOST_WIDE_INT flags
;
4132 flags
= TARGET_DEFAULT
;
4135 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4136 const char *default_cpu
= (!TARGET_POWERPC64
4141 int default_cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4142 flags
= processor_target_table
[default_cpu_index
].target_enable
;
4144 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
4147 if (rs6000_tune_index
>= 0)
4148 tune_index
= rs6000_tune_index
;
4149 else if (cpu_index
>= 0)
4150 rs6000_tune_index
= tune_index
= cpu_index
;
4154 enum processor_type tune_proc
4155 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
4158 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
4159 if (processor_target_table
[i
].processor
== tune_proc
)
4166 gcc_assert (tune_index
>= 0);
4167 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
4169 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
4170 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
4171 || rs6000_cpu
== PROCESSOR_PPCE5500
)
4174 error ("AltiVec not supported in this target");
4177 /* If we are optimizing big endian systems for space, use the load/store
4178 multiple and string instructions. */
4179 if (BYTES_BIG_ENDIAN
&& optimize_size
)
4180 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
4181 | OPTION_MASK_STRING
);
4183 /* Don't allow -mmultiple or -mstring on little endian systems
4184 unless the cpu is a 750, because the hardware doesn't support the
4185 instructions used in little endian mode, and causes an alignment
4186 trap. The 750 does not cause an alignment trap (except when the
4187 target is unaligned). */
4189 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
4191 if (TARGET_MULTIPLE
)
4193 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
4194 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
4195 warning (0, "%qs is not supported on little endian systems",
4201 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4202 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
4203 warning (0, "%qs is not supported on little endian systems",
4208 /* If little-endian, default to -mstrict-align on older processors.
4209 Testing for htm matches power8 and later. */
4210 if (!BYTES_BIG_ENDIAN
4211 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
4212 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
4214 /* -maltivec={le,be} implies -maltivec. */
4215 if (rs6000_altivec_element_order
!= 0)
4216 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
4218 /* Disallow -maltivec=le in big endian mode for now. This is not
4219 known to be useful for anyone. */
4220 if (BYTES_BIG_ENDIAN
&& rs6000_altivec_element_order
== 1)
4222 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4223 rs6000_altivec_element_order
= 0;
4226 if (!rs6000_fold_gimple
)
4228 "gimple folding of rs6000 builtins has been disabled.\n");
4230 /* Add some warnings for VSX. */
4233 const char *msg
= NULL
;
4234 if (!TARGET_HARD_FLOAT
|| !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
4236 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4237 msg
= N_("-mvsx requires hardware floating point");
4240 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4241 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4244 else if (TARGET_PAIRED_FLOAT
)
4245 msg
= N_("-mvsx and -mpaired are incompatible");
4246 else if (TARGET_AVOID_XFORM
> 0)
4247 msg
= N_("-mvsx needs indexed addressing");
4248 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
4249 & OPTION_MASK_ALTIVEC
))
4251 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4252 msg
= N_("-mvsx and -mno-altivec are incompatible");
4254 msg
= N_("-mno-altivec disables vsx");
4260 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4261 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4265 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4266 the -mcpu setting to enable options that conflict. */
4267 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4268 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4269 | OPTION_MASK_ALTIVEC
4270 | OPTION_MASK_VSX
)) != 0)
4271 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4272 | OPTION_MASK_DIRECT_MOVE
)
4273 & ~rs6000_isa_flags_explicit
);
4275 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4276 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4278 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4279 off all of the options that depend on those flags. */
4280 ignore_masks
= rs6000_disable_incompatible_switches ();
4282 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4283 unless the user explicitly used the -mno-<option> to disable the code. */
4284 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4285 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4286 else if (TARGET_P9_MINMAX
)
4290 if (cpu_index
== PROCESSOR_POWER9
)
4292 /* legacy behavior: allow -mcpu=power9 with certain
4293 capabilities explicitly disabled. */
4294 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4297 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4298 "for <xxx> less than power9", "-mcpu");
4300 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4301 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4302 & rs6000_isa_flags_explicit
))
4303 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4304 were explicitly cleared. */
4305 error ("%qs incompatible with explicitly disabled options",
4308 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4310 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4311 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4312 else if (TARGET_VSX
)
4313 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4314 else if (TARGET_POPCNTD
)
4315 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4316 else if (TARGET_DFP
)
4317 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4318 else if (TARGET_CMPB
)
4319 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4320 else if (TARGET_FPRND
)
4321 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4322 else if (TARGET_POPCNTB
)
4323 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4324 else if (TARGET_ALTIVEC
)
4325 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4327 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4329 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4330 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4331 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4334 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4336 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4337 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4338 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4341 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4343 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4344 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4345 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4348 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4350 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4351 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4352 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4353 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4355 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4356 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4357 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4361 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4363 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4364 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4368 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4370 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4371 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4372 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4375 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4376 silently turn off quad memory mode. */
4377 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4379 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4380 warning (0, N_("-mquad-memory requires 64-bit mode"));
4382 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4383 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4385 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4386 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4389 /* Non-atomic quad memory load/store are disabled for little endian, since
4390 the words are reversed, but atomic operations can still be done by
4391 swapping the words. */
4392 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4394 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4395 warning (0, N_("-mquad-memory is not available in little endian "
4398 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4401 /* Assume if the user asked for normal quad memory instructions, they want
4402 the atomic versions as well, unless they explicity told us not to use quad
4403 word atomic instructions. */
4404 if (TARGET_QUAD_MEMORY
4405 && !TARGET_QUAD_MEMORY_ATOMIC
4406 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4407 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4409 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4410 generating power8 instructions. */
4411 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4412 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4413 & OPTION_MASK_P8_FUSION
);
4415 /* Setting additional fusion flags turns on base fusion. */
4416 if (!TARGET_P8_FUSION
&& (TARGET_P8_FUSION_SIGN
|| TARGET_TOC_FUSION
))
4418 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4420 if (TARGET_P8_FUSION_SIGN
)
4421 error ("%qs requires %qs", "-mpower8-fusion-sign",
4424 if (TARGET_TOC_FUSION
)
4425 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4427 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4430 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4433 /* Power9 fusion is a superset over power8 fusion. */
4434 if (TARGET_P9_FUSION
&& !TARGET_P8_FUSION
)
4436 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4438 /* We prefer to not mention undocumented options in
4439 error messages. However, if users have managed to select
4440 power9-fusion without selecting power8-fusion, they
4441 already know about undocumented flags. */
4442 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4443 rs6000_isa_flags
&= ~OPTION_MASK_P9_FUSION
;
4446 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4449 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4450 generating power9 instructions. */
4451 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P9_FUSION
))
4452 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4453 & OPTION_MASK_P9_FUSION
);
4455 /* Power8 does not fuse sign extended loads with the addis. If we are
4456 optimizing at high levels for speed, convert a sign extended load into a
4457 zero extending load, and an explicit sign extension. */
4458 if (TARGET_P8_FUSION
4459 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4460 && optimize_function_for_speed_p (cfun
)
4462 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4464 /* TOC fusion requires 64-bit and medium/large code model. */
4465 if (TARGET_TOC_FUSION
&& !TARGET_POWERPC64
)
4467 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4468 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4469 warning (0, N_("-mtoc-fusion requires 64-bit"));
4472 if (TARGET_TOC_FUSION
&& (TARGET_CMODEL
== CMODEL_SMALL
))
4474 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4475 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4476 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4479 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4481 if (TARGET_P8_FUSION
&& !TARGET_TOC_FUSION
&& TARGET_POWERPC64
4482 && (TARGET_CMODEL
!= CMODEL_SMALL
)
4483 && !(rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
))
4484 rs6000_isa_flags
|= OPTION_MASK_TOC_FUSION
;
4486 /* ISA 3.0 vector instructions include ISA 2.07. */
4487 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4489 /* We prefer to not mention undocumented options in
4490 error messages. However, if users have managed to select
4491 power9-vector without selecting power8-vector, they
4492 already know about undocumented flags. */
4493 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4494 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4495 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4496 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4498 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4499 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4500 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4504 /* OPTION_MASK_P9_VECTOR is explicit and
4505 OPTION_MASK_P8_VECTOR is not explicit. */
4506 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4507 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4511 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4512 support. If we only have ISA 2.06 support, and the user did not specify
4513 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4514 but we don't enable the full vectorization support */
4515 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4516 TARGET_ALLOW_MOVMISALIGN
= 1;
4518 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4520 if (TARGET_ALLOW_MOVMISALIGN
> 0
4521 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4522 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4524 TARGET_ALLOW_MOVMISALIGN
= 0;
4527 /* Determine when unaligned vector accesses are permitted, and when
4528 they are preferred over masked Altivec loads. Note that if
4529 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4530 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4532 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4536 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4537 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4539 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4542 else if (!TARGET_ALLOW_MOVMISALIGN
)
4544 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4545 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4546 "-mallow-movmisalign");
4548 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4552 /* Set long double size before the IEEE 128-bit tests. */
4553 if (!global_options_set
.x_rs6000_long_double_type_size
)
4555 if (main_target_opt
!= NULL
4556 && (main_target_opt
->x_rs6000_long_double_type_size
4557 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
4558 error ("target attribute or pragma changes long double size");
4560 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
4563 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4564 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4565 pick up this default. */
4566 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4567 if (!global_options_set
.x_rs6000_ieeequad
)
4568 rs6000_ieeequad
= 1;
4571 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4572 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4573 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4574 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4575 the keyword as well as the type. */
4576 TARGET_FLOAT128_TYPE
= TARGET_FLOAT128_ENABLE_TYPE
&& TARGET_VSX
;
4578 /* IEEE 128-bit floating point requires VSX support. */
4579 if (TARGET_FLOAT128_KEYWORD
)
4583 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4584 error ("%qs requires VSX support", "-mfloat128");
4586 TARGET_FLOAT128_TYPE
= 0;
4587 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_KEYWORD
4588 | OPTION_MASK_FLOAT128_HW
);
4590 else if (!TARGET_FLOAT128_TYPE
)
4592 TARGET_FLOAT128_TYPE
= 1;
4593 warning (0, "The -mfloat128 option may not be fully supported");
4597 /* Enable the __float128 keyword under Linux by default. */
4598 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_KEYWORD
4599 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4600 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4602 /* If we have are supporting the float128 type and full ISA 3.0 support,
4603 enable -mfloat128-hardware by default. However, don't enable the
4604 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4605 because sometimes the compiler wants to put things in an integer
4606 container, and if we don't have __int128 support, it is impossible. */
4607 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
&& TARGET_64BIT
4608 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4609 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4610 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4612 if (TARGET_FLOAT128_HW
4613 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4615 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4616 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4618 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4621 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4623 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4624 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4626 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4629 /* Print the options after updating the defaults. */
4630 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4631 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4633 /* E500mc does "better" if we inline more aggressively. Respect the
4634 user's opinion, though. */
4635 if (rs6000_block_move_inline_limit
== 0
4636 && (rs6000_cpu
== PROCESSOR_PPCE500MC
4637 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4638 || rs6000_cpu
== PROCESSOR_PPCE5500
4639 || rs6000_cpu
== PROCESSOR_PPCE6500
))
4640 rs6000_block_move_inline_limit
= 128;
4642 /* store_one_arg depends on expand_block_move to handle at least the
4643 size of reg_parm_stack_space. */
4644 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4645 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4649 /* If the appropriate debug option is enabled, replace the target hooks
4650 with debug versions that call the real version and then prints
4651 debugging information. */
4652 if (TARGET_DEBUG_COST
)
4654 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4655 targetm
.address_cost
= rs6000_debug_address_cost
;
4656 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4659 if (TARGET_DEBUG_ADDR
)
4661 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4662 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4663 rs6000_secondary_reload_class_ptr
4664 = rs6000_debug_secondary_reload_class
;
4665 targetm
.secondary_memory_needed
4666 = rs6000_debug_secondary_memory_needed
;
4667 targetm
.can_change_mode_class
4668 = rs6000_debug_can_change_mode_class
;
4669 rs6000_preferred_reload_class_ptr
4670 = rs6000_debug_preferred_reload_class
;
4671 rs6000_legitimize_reload_address_ptr
4672 = rs6000_debug_legitimize_reload_address
;
4673 rs6000_mode_dependent_address_ptr
4674 = rs6000_debug_mode_dependent_address
;
4677 if (rs6000_veclibabi_name
)
4679 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4680 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4683 error ("unknown vectorization library ABI type (%qs) for "
4684 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4690 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4691 target attribute or pragma which automatically enables both options,
4692 unless the altivec ABI was set. This is set by default for 64-bit, but
4694 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4696 TARGET_FLOAT128_TYPE
= 0;
4697 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4698 | OPTION_MASK_FLOAT128_KEYWORD
)
4699 & ~rs6000_isa_flags_explicit
);
4702 /* Enable Altivec ABI for AIX -maltivec. */
4703 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4705 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4706 error ("target attribute or pragma changes AltiVec ABI");
4708 rs6000_altivec_abi
= 1;
4711 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4712 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4713 be explicitly overridden in either case. */
4716 if (!global_options_set
.x_rs6000_altivec_abi
4717 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4719 if (main_target_opt
!= NULL
&&
4720 !main_target_opt
->x_rs6000_altivec_abi
)
4721 error ("target attribute or pragma changes AltiVec ABI");
4723 rs6000_altivec_abi
= 1;
4727 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4728 So far, the only darwin64 targets are also MACH-O. */
4730 && DEFAULT_ABI
== ABI_DARWIN
4733 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4734 error ("target attribute or pragma changes darwin64 ABI");
4737 rs6000_darwin64_abi
= 1;
4738 /* Default to natural alignment, for better performance. */
4739 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4743 /* Place FP constants in the constant pool instead of TOC
4744 if section anchors enabled. */
4745 if (flag_section_anchors
4746 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4747 TARGET_NO_FP_IN_TOC
= 1;
4749 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4750 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4752 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4753 SUBTARGET_OVERRIDE_OPTIONS
;
4755 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4756 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4758 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4759 SUB3TARGET_OVERRIDE_OPTIONS
;
4762 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4763 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4765 /* For the E500 family of cores, reset the single/double FP flags to let us
4766 check that they remain constant across attributes or pragmas. Also,
4767 clear a possible request for string instructions, not supported and which
4768 we might have silently queried above for -Os.
4770 For other families, clear ISEL in case it was set implicitly.
4775 case PROCESSOR_PPC8540
:
4776 case PROCESSOR_PPC8548
:
4777 case PROCESSOR_PPCE500MC
:
4778 case PROCESSOR_PPCE500MC64
:
4779 case PROCESSOR_PPCE5500
:
4780 case PROCESSOR_PPCE6500
:
4782 rs6000_single_float
= 0;
4783 rs6000_double_float
= 0;
4785 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4791 if (cpu_index
>= 0 && !(rs6000_isa_flags_explicit
& OPTION_MASK_ISEL
))
4792 rs6000_isa_flags
&= ~OPTION_MASK_ISEL
;
4797 if (main_target_opt
)
4799 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
4800 error ("target attribute or pragma changes single precision floating "
4802 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
4803 error ("target attribute or pragma changes double precision floating "
4807 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
4808 && rs6000_cpu
!= PROCESSOR_POWER5
4809 && rs6000_cpu
!= PROCESSOR_POWER6
4810 && rs6000_cpu
!= PROCESSOR_POWER7
4811 && rs6000_cpu
!= PROCESSOR_POWER8
4812 && rs6000_cpu
!= PROCESSOR_POWER9
4813 && rs6000_cpu
!= PROCESSOR_PPCA2
4814 && rs6000_cpu
!= PROCESSOR_CELL
4815 && rs6000_cpu
!= PROCESSOR_PPC476
);
4816 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
4817 || rs6000_cpu
== PROCESSOR_POWER5
4818 || rs6000_cpu
== PROCESSOR_POWER7
4819 || rs6000_cpu
== PROCESSOR_POWER8
);
4820 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
4821 || rs6000_cpu
== PROCESSOR_POWER5
4822 || rs6000_cpu
== PROCESSOR_POWER6
4823 || rs6000_cpu
== PROCESSOR_POWER7
4824 || rs6000_cpu
== PROCESSOR_POWER8
4825 || rs6000_cpu
== PROCESSOR_POWER9
4826 || rs6000_cpu
== PROCESSOR_PPCE500MC
4827 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4828 || rs6000_cpu
== PROCESSOR_PPCE5500
4829 || rs6000_cpu
== PROCESSOR_PPCE6500
);
4831 /* Allow debug switches to override the above settings. These are set to -1
4832 in rs6000.opt to indicate the user hasn't directly set the switch. */
4833 if (TARGET_ALWAYS_HINT
>= 0)
4834 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4836 if (TARGET_SCHED_GROUPS
>= 0)
4837 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4839 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4840 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4842 rs6000_sched_restricted_insns_priority
4843 = (rs6000_sched_groups
? 1 : 0);
4845 /* Handle -msched-costly-dep option. */
4846 rs6000_sched_costly_dep
4847 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4849 if (rs6000_sched_costly_dep_str
)
4851 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4852 rs6000_sched_costly_dep
= no_dep_costly
;
4853 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4854 rs6000_sched_costly_dep
= all_deps_costly
;
4855 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4856 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4857 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4858 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4860 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4861 atoi (rs6000_sched_costly_dep_str
));
4864 /* Handle -minsert-sched-nops option. */
4865 rs6000_sched_insert_nops
4866 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4868 if (rs6000_sched_insert_nops_str
)
4870 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4871 rs6000_sched_insert_nops
= sched_finish_none
;
4872 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4873 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4874 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4875 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4877 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4878 atoi (rs6000_sched_insert_nops_str
));
4881 /* Handle stack protector */
4882 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4883 #ifdef TARGET_THREAD_SSP_OFFSET
4884 rs6000_stack_protector_guard
= SSP_TLS
;
4886 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4889 #ifdef TARGET_THREAD_SSP_OFFSET
4890 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4891 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4894 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4897 const char *str
= rs6000_stack_protector_guard_offset_str
;
4900 long offset
= strtol (str
, &endp
, 0);
4901 if (!*str
|| *endp
|| errno
)
4902 error ("%qs is not a valid number in %qs", str
,
4903 "-mstack-protector-guard-offset=");
4905 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4906 || (TARGET_64BIT
&& (offset
& 3)))
4907 error ("%qs is not a valid offset in %qs", str
,
4908 "-mstack-protector-guard-offset=");
4910 rs6000_stack_protector_guard_offset
= offset
;
4913 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4915 const char *str
= rs6000_stack_protector_guard_reg_str
;
4916 int reg
= decode_reg_name (str
);
4918 if (!IN_RANGE (reg
, 1, 31))
4919 error ("%qs is not a valid base register in %qs", str
,
4920 "-mstack-protector-guard-reg=");
4922 rs6000_stack_protector_guard_reg
= reg
;
4925 if (rs6000_stack_protector_guard
== SSP_TLS
4926 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4927 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4931 #ifdef TARGET_REGNAMES
4932 /* If the user desires alternate register names, copy in the
4933 alternate names now. */
4934 if (TARGET_REGNAMES
)
4935 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4938 /* Set aix_struct_return last, after the ABI is determined.
4939 If -maix-struct-return or -msvr4-struct-return was explicitly
4940 used, don't override with the ABI default. */
4941 if (!global_options_set
.x_aix_struct_return
)
4942 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4945 /* IBM XL compiler defaults to unsigned bitfields. */
4946 if (TARGET_XL_COMPAT
)
4947 flag_signed_bitfields
= 0;
4950 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4951 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4953 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4955 /* We can only guarantee the availability of DI pseudo-ops when
4956 assembling for 64-bit targets. */
4959 targetm
.asm_out
.aligned_op
.di
= NULL
;
4960 targetm
.asm_out
.unaligned_op
.di
= NULL
;
4964 /* Set branch target alignment, if not optimizing for size. */
4967 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4968 aligned 8byte to avoid misprediction by the branch predictor. */
4969 if (rs6000_cpu
== PROCESSOR_TITAN
4970 || rs6000_cpu
== PROCESSOR_CELL
)
4972 if (align_functions
<= 0)
4973 align_functions
= 8;
4974 if (align_jumps
<= 0)
4976 if (align_loops
<= 0)
4979 if (rs6000_align_branch_targets
)
4981 if (align_functions
<= 0)
4982 align_functions
= 16;
4983 if (align_jumps
<= 0)
4985 if (align_loops
<= 0)
4987 can_override_loop_align
= 1;
4991 if (align_jumps_max_skip
<= 0)
4992 align_jumps_max_skip
= 15;
4993 if (align_loops_max_skip
<= 0)
4994 align_loops_max_skip
= 15;
4997 /* Arrange to save and restore machine status around nested functions. */
4998 init_machine_status
= rs6000_init_machine_status
;
5000 /* We should always be splitting complex arguments, but we can't break
5001 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5002 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
5003 targetm
.calls
.split_complex_arg
= NULL
;
5005 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5006 if (DEFAULT_ABI
== ABI_AIX
)
5007 targetm
.calls
.custom_function_descriptors
= 0;
5010 /* Initialize rs6000_cost with the appropriate target costs. */
5012 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
5016 case PROCESSOR_RS64A
:
5017 rs6000_cost
= &rs64a_cost
;
5020 case PROCESSOR_MPCCORE
:
5021 rs6000_cost
= &mpccore_cost
;
5024 case PROCESSOR_PPC403
:
5025 rs6000_cost
= &ppc403_cost
;
5028 case PROCESSOR_PPC405
:
5029 rs6000_cost
= &ppc405_cost
;
5032 case PROCESSOR_PPC440
:
5033 rs6000_cost
= &ppc440_cost
;
5036 case PROCESSOR_PPC476
:
5037 rs6000_cost
= &ppc476_cost
;
5040 case PROCESSOR_PPC601
:
5041 rs6000_cost
= &ppc601_cost
;
5044 case PROCESSOR_PPC603
:
5045 rs6000_cost
= &ppc603_cost
;
5048 case PROCESSOR_PPC604
:
5049 rs6000_cost
= &ppc604_cost
;
5052 case PROCESSOR_PPC604e
:
5053 rs6000_cost
= &ppc604e_cost
;
5056 case PROCESSOR_PPC620
:
5057 rs6000_cost
= &ppc620_cost
;
5060 case PROCESSOR_PPC630
:
5061 rs6000_cost
= &ppc630_cost
;
5064 case PROCESSOR_CELL
:
5065 rs6000_cost
= &ppccell_cost
;
5068 case PROCESSOR_PPC750
:
5069 case PROCESSOR_PPC7400
:
5070 rs6000_cost
= &ppc750_cost
;
5073 case PROCESSOR_PPC7450
:
5074 rs6000_cost
= &ppc7450_cost
;
5077 case PROCESSOR_PPC8540
:
5078 case PROCESSOR_PPC8548
:
5079 rs6000_cost
= &ppc8540_cost
;
5082 case PROCESSOR_PPCE300C2
:
5083 case PROCESSOR_PPCE300C3
:
5084 rs6000_cost
= &ppce300c2c3_cost
;
5087 case PROCESSOR_PPCE500MC
:
5088 rs6000_cost
= &ppce500mc_cost
;
5091 case PROCESSOR_PPCE500MC64
:
5092 rs6000_cost
= &ppce500mc64_cost
;
5095 case PROCESSOR_PPCE5500
:
5096 rs6000_cost
= &ppce5500_cost
;
5099 case PROCESSOR_PPCE6500
:
5100 rs6000_cost
= &ppce6500_cost
;
5103 case PROCESSOR_TITAN
:
5104 rs6000_cost
= &titan_cost
;
5107 case PROCESSOR_POWER4
:
5108 case PROCESSOR_POWER5
:
5109 rs6000_cost
= &power4_cost
;
5112 case PROCESSOR_POWER6
:
5113 rs6000_cost
= &power6_cost
;
5116 case PROCESSOR_POWER7
:
5117 rs6000_cost
= &power7_cost
;
5120 case PROCESSOR_POWER8
:
5121 rs6000_cost
= &power8_cost
;
5124 case PROCESSOR_POWER9
:
5125 rs6000_cost
= &power9_cost
;
5128 case PROCESSOR_PPCA2
:
5129 rs6000_cost
= &ppca2_cost
;
5138 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
5139 rs6000_cost
->simultaneous_prefetches
,
5140 global_options
.x_param_values
,
5141 global_options_set
.x_param_values
);
5142 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
5143 global_options
.x_param_values
,
5144 global_options_set
.x_param_values
);
5145 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
5146 rs6000_cost
->cache_line_size
,
5147 global_options
.x_param_values
,
5148 global_options_set
.x_param_values
);
5149 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
5150 global_options
.x_param_values
,
5151 global_options_set
.x_param_values
);
5153 /* Increase loop peeling limits based on performance analysis. */
5154 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
5155 global_options
.x_param_values
,
5156 global_options_set
.x_param_values
);
5157 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
5158 global_options
.x_param_values
,
5159 global_options_set
.x_param_values
);
5161 /* Use the 'model' -fsched-pressure algorithm by default. */
5162 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
5163 SCHED_PRESSURE_MODEL
,
5164 global_options
.x_param_values
,
5165 global_options_set
.x_param_values
);
5167 /* If using typedef char *va_list, signal that
5168 __builtin_va_start (&ap, 0) can be optimized to
5169 ap = __builtin_next_arg (0). */
5170 if (DEFAULT_ABI
!= ABI_V4
)
5171 targetm
.expand_builtin_va_start
= NULL
;
5174 /* Set up single/double float flags.
5175 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5176 then set both flags. */
5177 if (TARGET_HARD_FLOAT
&& rs6000_single_float
== 0 && rs6000_double_float
== 0)
5178 rs6000_single_float
= rs6000_double_float
= 1;
5180 /* If not explicitly specified via option, decide whether to generate indexed
5181 load/store instructions. A value of -1 indicates that the
5182 initial value of this variable has not been overwritten. During
5183 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5184 if (TARGET_AVOID_XFORM
== -1)
5185 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5186 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5187 need indexed accesses and the type used is the scalar type of the element
5188 being loaded or stored. */
5189 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
5190 && !TARGET_ALTIVEC
);
5192 /* Set the -mrecip options. */
5193 if (rs6000_recip_name
)
5195 char *p
= ASTRDUP (rs6000_recip_name
);
5197 unsigned int mask
, i
;
5200 while ((q
= strtok (p
, ",")) != NULL
)
5211 if (!strcmp (q
, "default"))
5212 mask
= ((TARGET_RECIP_PRECISION
)
5213 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
5216 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
5217 if (!strcmp (q
, recip_options
[i
].string
))
5219 mask
= recip_options
[i
].mask
;
5223 if (i
== ARRAY_SIZE (recip_options
))
5225 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
5233 rs6000_recip_control
&= ~mask
;
5235 rs6000_recip_control
|= mask
;
5239 /* Set the builtin mask of the various options used that could affect which
5240 builtins were used. In the past we used target_flags, but we've run out
5241 of bits, and some options like PAIRED are no longer in target_flags. */
5242 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
5243 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
5244 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
5245 rs6000_builtin_mask
);
5247 /* Initialize all of the registers. */
5248 rs6000_init_hard_regno_mode_ok (global_init_p
);
5250 /* Save the initial options in case the user does function specific options */
5252 target_option_default_node
= target_option_current_node
5253 = build_target_option_node (&global_options
);
5255 /* If not explicitly specified via option, decide whether to generate the
5256 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5257 if (TARGET_LINK_STACK
== -1)
5258 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
5263 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5264 define the target cpu type. */
5267 rs6000_option_override (void)
5269 (void) rs6000_option_override_internal (true);
5273 /* Implement targetm.vectorize.builtin_mask_for_load. */
5275 rs6000_builtin_mask_for_load (void)
5277 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5278 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
5279 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
5280 return altivec_builtin_mask_for_load
;
5285 /* Implement LOOP_ALIGN. */
5287 rs6000_loop_align (rtx label
)
5292 /* Don't override loop alignment if -falign-loops was specified. */
5293 if (!can_override_loop_align
)
5294 return align_loops_log
;
5296 bb
= BLOCK_FOR_INSN (label
);
5297 ninsns
= num_loop_insns(bb
->loop_father
);
5299 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5300 if (ninsns
> 4 && ninsns
<= 8
5301 && (rs6000_cpu
== PROCESSOR_POWER4
5302 || rs6000_cpu
== PROCESSOR_POWER5
5303 || rs6000_cpu
== PROCESSOR_POWER6
5304 || rs6000_cpu
== PROCESSOR_POWER7
5305 || rs6000_cpu
== PROCESSOR_POWER8
5306 || rs6000_cpu
== PROCESSOR_POWER9
))
5309 return align_loops_log
;
5312 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5314 rs6000_loop_align_max_skip (rtx_insn
*label
)
5316 return (1 << rs6000_loop_align (label
)) - 1;
5319 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5320 after applying N number of iterations. This routine does not determine
5321 how may iterations are required to reach desired alignment. */
5324 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5331 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5334 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5344 /* Assuming that all other types are naturally aligned. CHECKME! */
5349 /* Return true if the vector misalignment factor is supported by the
5352 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5359 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5362 /* Return if movmisalign pattern is not supported for this mode. */
5363 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5366 if (misalignment
== -1)
5368 /* Misalignment factor is unknown at compile time but we know
5369 it's word aligned. */
5370 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5372 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5374 if (element_size
== 64 || element_size
== 32)
5381 /* VSX supports word-aligned vector. */
5382 if (misalignment
% 4 == 0)
5388 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5390 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5391 tree vectype
, int misalign
)
5396 switch (type_of_cost
)
5406 case cond_branch_not_taken
:
5415 case vec_promote_demote
:
5421 case cond_branch_taken
:
5424 case unaligned_load
:
5425 case vector_gather_load
:
5426 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5429 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5431 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5433 /* Double word aligned. */
5441 /* Double word aligned. */
5445 /* Unknown misalignment. */
5458 /* Misaligned loads are not supported. */
5463 case unaligned_store
:
5464 case vector_scatter_store
:
5465 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5468 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5470 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5472 /* Double word aligned. */
5480 /* Double word aligned. */
5484 /* Unknown misalignment. */
5497 /* Misaligned stores are not supported. */
5503 /* This is a rough approximation assuming non-constant elements
5504 constructed into a vector via element insertion. FIXME:
5505 vec_construct is not granular enough for uniformly good
5506 decisions. If the initialization is a splat, this is
5507 cheaper than we estimate. Improve this someday. */
5508 elem_type
= TREE_TYPE (vectype
);
5509 /* 32-bit vectors loaded into registers are stored as double
5510 precision, so we need 2 permutes, 2 converts, and 1 merge
5511 to construct a vector of short floats from them. */
5512 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5513 && TYPE_PRECISION (elem_type
) == 32)
5515 /* On POWER9, integer vector types are built up in GPRs and then
5516 use a direct move (2 cycles). For POWER8 this is even worse,
5517 as we need two direct moves and a merge, and the direct moves
5519 else if (INTEGRAL_TYPE_P (elem_type
))
5521 if (TARGET_P9_VECTOR
)
5522 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5524 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5527 /* V2DFmode doesn't need a direct move. */
5535 /* Implement targetm.vectorize.preferred_simd_mode. */
5538 rs6000_preferred_simd_mode (scalar_mode mode
)
5547 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5564 if (TARGET_PAIRED_FLOAT
5570 typedef struct _rs6000_cost_data
5572 struct loop
*loop_info
;
5576 /* Test for likely overcommitment of vector hardware resources. If a
5577 loop iteration is relatively large, and too large a percentage of
5578 instructions in the loop are vectorized, the cost model may not
5579 adequately reflect delays from unavailable vector resources.
5580 Penalize the loop body cost for this case. */
5583 rs6000_density_test (rs6000_cost_data
*data
)
5585 const int DENSITY_PCT_THRESHOLD
= 85;
5586 const int DENSITY_SIZE_THRESHOLD
= 70;
5587 const int DENSITY_PENALTY
= 10;
5588 struct loop
*loop
= data
->loop_info
;
5589 basic_block
*bbs
= get_loop_body (loop
);
5590 int nbbs
= loop
->num_nodes
;
5591 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5594 for (i
= 0; i
< nbbs
; i
++)
5596 basic_block bb
= bbs
[i
];
5597 gimple_stmt_iterator gsi
;
5599 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5601 gimple
*stmt
= gsi_stmt (gsi
);
5602 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5604 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5605 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5611 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5613 if (density_pct
> DENSITY_PCT_THRESHOLD
5614 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5616 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5617 if (dump_enabled_p ())
5618 dump_printf_loc (MSG_NOTE
, vect_location
,
5619 "density %d%%, cost %d exceeds threshold, penalizing "
5620 "loop body cost by %d%%", density_pct
,
5621 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5625 /* Implement targetm.vectorize.init_cost. */
5627 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5628 instruction is needed by the vectorization. */
5629 static bool rs6000_vect_nonmem
;
5632 rs6000_init_cost (struct loop
*loop_info
)
5634 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5635 data
->loop_info
= loop_info
;
5636 data
->cost
[vect_prologue
] = 0;
5637 data
->cost
[vect_body
] = 0;
5638 data
->cost
[vect_epilogue
] = 0;
5639 rs6000_vect_nonmem
= false;
5643 /* Implement targetm.vectorize.add_stmt_cost. */
5646 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5647 struct _stmt_vec_info
*stmt_info
, int misalign
,
5648 enum vect_cost_model_location where
)
5650 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5651 unsigned retval
= 0;
5653 if (flag_vect_cost_model
)
5655 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5656 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5658 /* Statements in an inner loop relative to the loop being
5659 vectorized are weighted more heavily. The value here is
5660 arbitrary and could potentially be improved with analysis. */
5661 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5662 count
*= 50; /* FIXME. */
5664 retval
= (unsigned) (count
* stmt_cost
);
5665 cost_data
->cost
[where
] += retval
;
5667 /* Check whether we're doing something other than just a copy loop.
5668 Not all such loops may be profitably vectorized; see
5669 rs6000_finish_cost. */
5670 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5671 || kind
== vec_promote_demote
|| kind
== vec_construct
5672 || kind
== scalar_to_vec
)
5673 || (where
== vect_body
&& kind
== vector_stmt
))
5674 rs6000_vect_nonmem
= true;
5680 /* Implement targetm.vectorize.finish_cost. */
5683 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5684 unsigned *body_cost
, unsigned *epilogue_cost
)
5686 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5688 if (cost_data
->loop_info
)
5689 rs6000_density_test (cost_data
);
5691 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5692 that require versioning for any reason. The vectorization is at
5693 best a wash inside the loop, and the versioning checks make
5694 profitability highly unlikely and potentially quite harmful. */
5695 if (cost_data
->loop_info
)
5697 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5698 if (!rs6000_vect_nonmem
5699 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5700 && LOOP_REQUIRES_VERSIONING (vec_info
))
5701 cost_data
->cost
[vect_body
] += 10000;
5704 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5705 *body_cost
= cost_data
->cost
[vect_body
];
5706 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5709 /* Implement targetm.vectorize.destroy_cost_data. */
5712 rs6000_destroy_cost_data (void *data
)
5717 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5718 library with vectorized intrinsics. */
5721 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5725 const char *suffix
= NULL
;
5726 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5729 machine_mode el_mode
, in_mode
;
5732 /* Libmass is suitable for unsafe math only as it does not correctly support
5733 parts of IEEE with the required precision such as denormals. Only support
5734 it if we have VSX to use the simd d2 or f4 functions.
5735 XXX: Add variable length support. */
5736 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5739 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5740 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5741 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5742 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5743 if (el_mode
!= in_mode
5779 if (el_mode
== DFmode
&& n
== 2)
5781 bdecl
= mathfn_built_in (double_type_node
, fn
);
5782 suffix
= "d2"; /* pow -> powd2 */
5784 else if (el_mode
== SFmode
&& n
== 4)
5786 bdecl
= mathfn_built_in (float_type_node
, fn
);
5787 suffix
= "4"; /* powf -> powf4 */
5799 gcc_assert (suffix
!= NULL
);
5800 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5804 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5805 strcat (name
, suffix
);
5808 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5809 else if (n_args
== 2)
5810 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5814 /* Build a function declaration for the vectorized function. */
5815 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5816 FUNCTION_DECL
, get_identifier (name
), fntype
);
5817 TREE_PUBLIC (new_fndecl
) = 1;
5818 DECL_EXTERNAL (new_fndecl
) = 1;
5819 DECL_IS_NOVOPS (new_fndecl
) = 1;
5820 TREE_READONLY (new_fndecl
) = 1;
5825 /* Returns a function decl for a vectorized version of the builtin function
5826 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5827 if it is not available. */
5830 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5833 machine_mode in_mode
, out_mode
;
5836 if (TARGET_DEBUG_BUILTIN
)
5837 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5838 combined_fn_name (combined_fn (fn
)),
5839 GET_MODE_NAME (TYPE_MODE (type_out
)),
5840 GET_MODE_NAME (TYPE_MODE (type_in
)));
5842 if (TREE_CODE (type_out
) != VECTOR_TYPE
5843 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5846 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5847 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5848 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5849 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5854 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5855 && out_mode
== DFmode
&& out_n
== 2
5856 && in_mode
== DFmode
&& in_n
== 2)
5857 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5858 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5859 && out_mode
== SFmode
&& out_n
== 4
5860 && in_mode
== SFmode
&& in_n
== 4)
5861 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5862 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5863 && out_mode
== SFmode
&& out_n
== 4
5864 && in_mode
== SFmode
&& in_n
== 4)
5865 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5868 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5869 && out_mode
== DFmode
&& out_n
== 2
5870 && in_mode
== DFmode
&& in_n
== 2)
5871 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5872 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5873 && out_mode
== SFmode
&& out_n
== 4
5874 && in_mode
== SFmode
&& in_n
== 4)
5875 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5876 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5877 && out_mode
== SFmode
&& out_n
== 4
5878 && in_mode
== SFmode
&& in_n
== 4)
5879 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5882 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5883 && out_mode
== DFmode
&& out_n
== 2
5884 && in_mode
== DFmode
&& in_n
== 2)
5885 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5886 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5887 && out_mode
== SFmode
&& out_n
== 4
5888 && in_mode
== SFmode
&& in_n
== 4)
5889 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5890 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5891 && out_mode
== SFmode
&& out_n
== 4
5892 && in_mode
== SFmode
&& in_n
== 4)
5893 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5896 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5897 && out_mode
== DFmode
&& out_n
== 2
5898 && in_mode
== DFmode
&& in_n
== 2)
5899 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5900 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5901 && out_mode
== SFmode
&& out_n
== 4
5902 && in_mode
== SFmode
&& in_n
== 4)
5903 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5904 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5905 && out_mode
== SFmode
&& out_n
== 4
5906 && in_mode
== SFmode
&& in_n
== 4)
5907 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5910 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5911 && out_mode
== DFmode
&& out_n
== 2
5912 && in_mode
== DFmode
&& in_n
== 2)
5913 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5914 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5915 && out_mode
== SFmode
&& out_n
== 4
5916 && in_mode
== SFmode
&& in_n
== 4)
5917 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5918 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5919 && out_mode
== SFmode
&& out_n
== 4
5920 && in_mode
== SFmode
&& in_n
== 4)
5921 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5924 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5925 && flag_unsafe_math_optimizations
5926 && out_mode
== DFmode
&& out_n
== 2
5927 && in_mode
== DFmode
&& in_n
== 2)
5928 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5929 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5930 && flag_unsafe_math_optimizations
5931 && out_mode
== SFmode
&& out_n
== 4
5932 && in_mode
== SFmode
&& in_n
== 4)
5933 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5936 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5937 && !flag_trapping_math
5938 && out_mode
== DFmode
&& out_n
== 2
5939 && in_mode
== DFmode
&& in_n
== 2)
5940 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5941 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5942 && !flag_trapping_math
5943 && out_mode
== SFmode
&& out_n
== 4
5944 && in_mode
== SFmode
&& in_n
== 4)
5945 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5951 /* Generate calls to libmass if appropriate. */
5952 if (rs6000_veclib_handler
)
5953 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5958 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5961 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
5964 machine_mode in_mode
, out_mode
;
5967 if (TARGET_DEBUG_BUILTIN
)
5968 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5969 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
5970 GET_MODE_NAME (TYPE_MODE (type_out
)),
5971 GET_MODE_NAME (TYPE_MODE (type_in
)));
5973 if (TREE_CODE (type_out
) != VECTOR_TYPE
5974 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5977 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5978 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5979 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5980 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5982 enum rs6000_builtins fn
5983 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
5986 case RS6000_BUILTIN_RSQRTF
:
5987 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5988 && out_mode
== SFmode
&& out_n
== 4
5989 && in_mode
== SFmode
&& in_n
== 4)
5990 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
5992 case RS6000_BUILTIN_RSQRT
:
5993 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5994 && out_mode
== DFmode
&& out_n
== 2
5995 && in_mode
== DFmode
&& in_n
== 2)
5996 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
5998 case RS6000_BUILTIN_RECIPF
:
5999 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
6000 && out_mode
== SFmode
&& out_n
== 4
6001 && in_mode
== SFmode
&& in_n
== 4)
6002 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
6004 case RS6000_BUILTIN_RECIP
:
6005 if (VECTOR_UNIT_VSX_P (V2DFmode
)
6006 && out_mode
== DFmode
&& out_n
== 2
6007 && in_mode
== DFmode
&& in_n
== 2)
6008 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
6016 /* Default CPU string for rs6000*_file_start functions. */
6017 static const char *rs6000_default_cpu
;
6019 /* Do anything needed at the start of the asm file. */
6022 rs6000_file_start (void)
6025 const char *start
= buffer
;
6026 FILE *file
= asm_out_file
;
6028 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
6030 default_file_start ();
6032 if (flag_verbose_asm
)
6034 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
6036 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
6038 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
6042 if (global_options_set
.x_rs6000_cpu_index
)
6044 fprintf (file
, "%s -mcpu=%s", start
,
6045 processor_target_table
[rs6000_cpu_index
].name
);
6049 if (global_options_set
.x_rs6000_tune_index
)
6051 fprintf (file
, "%s -mtune=%s", start
,
6052 processor_target_table
[rs6000_tune_index
].name
);
6056 if (PPC405_ERRATUM77
)
6058 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
6062 #ifdef USING_ELFOS_H
6063 switch (rs6000_sdata
)
6065 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
6066 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
6067 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
6068 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
6071 if (rs6000_sdata
&& g_switch_value
)
6073 fprintf (file
, "%s -G %d", start
,
6083 #ifdef USING_ELFOS_H
6084 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
6085 && !global_options_set
.x_rs6000_cpu_index
)
6087 fputs ("\t.machine ", asm_out_file
);
6088 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
6089 fputs ("power9\n", asm_out_file
);
6090 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
6091 fputs ("power8\n", asm_out_file
);
6092 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
6093 fputs ("power7\n", asm_out_file
);
6094 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
6095 fputs ("power6\n", asm_out_file
);
6096 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
6097 fputs ("power5\n", asm_out_file
);
6098 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
6099 fputs ("power4\n", asm_out_file
);
6100 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
6101 fputs ("ppc64\n", asm_out_file
);
6103 fputs ("ppc\n", asm_out_file
);
6107 if (DEFAULT_ABI
== ABI_ELFv2
)
6108 fprintf (file
, "\t.abiversion 2\n");
6112 /* Return nonzero if this function is known to have a null epilogue. */
6115 direct_return (void)
6117 if (reload_completed
)
6119 rs6000_stack_t
*info
= rs6000_stack_info ();
6121 if (info
->first_gp_reg_save
== 32
6122 && info
->first_fp_reg_save
== 64
6123 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
6124 && ! info
->lr_save_p
6125 && ! info
->cr_save_p
6126 && info
->vrsave_size
== 0
6134 /* Return the number of instructions it takes to form a constant in an
6135 integer register. */
6138 num_insns_constant_wide (HOST_WIDE_INT value
)
6140 /* signed constant loadable with addi */
6141 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
6144 /* constant loadable with addis */
6145 else if ((value
& 0xffff) == 0
6146 && (value
>> 31 == -1 || value
>> 31 == 0))
6149 else if (TARGET_POWERPC64
)
6151 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
6152 HOST_WIDE_INT high
= value
>> 31;
6154 if (high
== 0 || high
== -1)
6160 return num_insns_constant_wide (high
) + 1;
6162 return num_insns_constant_wide (low
) + 1;
6164 return (num_insns_constant_wide (high
)
6165 + num_insns_constant_wide (low
) + 1);
6173 num_insns_constant (rtx op
, machine_mode mode
)
6175 HOST_WIDE_INT low
, high
;
6177 switch (GET_CODE (op
))
6180 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
6181 && rs6000_is_valid_and_mask (op
, mode
))
6184 return num_insns_constant_wide (INTVAL (op
));
6186 case CONST_WIDE_INT
:
6189 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
6190 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
6191 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
6196 if (mode
== SFmode
|| mode
== SDmode
)
6200 if (DECIMAL_FLOAT_MODE_P (mode
))
6201 REAL_VALUE_TO_TARGET_DECIMAL32
6202 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6204 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6205 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
6209 if (DECIMAL_FLOAT_MODE_P (mode
))
6210 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6212 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6213 high
= l
[WORDS_BIG_ENDIAN
== 0];
6214 low
= l
[WORDS_BIG_ENDIAN
!= 0];
6217 return (num_insns_constant_wide (low
)
6218 + num_insns_constant_wide (high
));
6221 if ((high
== 0 && low
>= 0)
6222 || (high
== -1 && low
< 0))
6223 return num_insns_constant_wide (low
);
6225 else if (rs6000_is_valid_and_mask (op
, mode
))
6229 return num_insns_constant_wide (high
) + 1;
6232 return (num_insns_constant_wide (high
)
6233 + num_insns_constant_wide (low
) + 1);
6241 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6242 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6243 corresponding element of the vector, but for V4SFmode and V2SFmode,
6244 the corresponding "float" is interpreted as an SImode integer. */
6247 const_vector_elt_as_int (rtx op
, unsigned int elt
)
6251 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6252 gcc_assert (GET_MODE (op
) != V2DImode
6253 && GET_MODE (op
) != V2DFmode
);
6255 tmp
= CONST_VECTOR_ELT (op
, elt
);
6256 if (GET_MODE (op
) == V4SFmode
6257 || GET_MODE (op
) == V2SFmode
)
6258 tmp
= gen_lowpart (SImode
, tmp
);
6259 return INTVAL (tmp
);
6262 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6263 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6264 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6265 all items are set to the same value and contain COPIES replicas of the
6266 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6267 operand and the others are set to the value of the operand's msb. */
6270 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
6272 machine_mode mode
= GET_MODE (op
);
6273 machine_mode inner
= GET_MODE_INNER (mode
);
6281 HOST_WIDE_INT splat_val
;
6282 HOST_WIDE_INT msb_val
;
6284 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
6287 nunits
= GET_MODE_NUNITS (mode
);
6288 bitsize
= GET_MODE_BITSIZE (inner
);
6289 mask
= GET_MODE_MASK (inner
);
6291 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6293 msb_val
= val
>= 0 ? 0 : -1;
6295 /* Construct the value to be splatted, if possible. If not, return 0. */
6296 for (i
= 2; i
<= copies
; i
*= 2)
6298 HOST_WIDE_INT small_val
;
6300 small_val
= splat_val
>> bitsize
;
6302 if (splat_val
!= ((HOST_WIDE_INT
)
6303 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
6304 | (small_val
& mask
)))
6306 splat_val
= small_val
;
6309 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6310 if (EASY_VECTOR_15 (splat_val
))
6313 /* Also check if we can splat, and then add the result to itself. Do so if
6314 the value is positive, of if the splat instruction is using OP's mode;
6315 for splat_val < 0, the splat and the add should use the same mode. */
6316 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6317 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6320 /* Also check if are loading up the most significant bit which can be done by
6321 loading up -1 and shifting the value left by -1. */
6322 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6328 /* Check if VAL is present in every STEP-th element, and the
6329 other elements are filled with its most significant bit. */
6330 for (i
= 1; i
< nunits
; ++i
)
6332 HOST_WIDE_INT desired_val
;
6333 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6334 if ((i
& (step
- 1)) == 0)
6337 desired_val
= msb_val
;
6339 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6346 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6347 instruction, filling in the bottom elements with 0 or -1.
6349 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6350 for the number of zeroes to shift in, or negative for the number of 0xff
6353 OP is a CONST_VECTOR. */
6356 vspltis_shifted (rtx op
)
6358 machine_mode mode
= GET_MODE (op
);
6359 machine_mode inner
= GET_MODE_INNER (mode
);
6367 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6370 /* We need to create pseudo registers to do the shift, so don't recognize
6371 shift vector constants after reload. */
6372 if (!can_create_pseudo_p ())
6375 nunits
= GET_MODE_NUNITS (mode
);
6376 mask
= GET_MODE_MASK (inner
);
6378 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6380 /* Check if the value can really be the operand of a vspltis[bhw]. */
6381 if (EASY_VECTOR_15 (val
))
6384 /* Also check if we are loading up the most significant bit which can be done
6385 by loading up -1 and shifting the value left by -1. */
6386 else if (EASY_VECTOR_MSB (val
, inner
))
6392 /* Check if VAL is present in every STEP-th element until we find elements
6393 that are 0 or all 1 bits. */
6394 for (i
= 1; i
< nunits
; ++i
)
6396 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6397 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6399 /* If the value isn't the splat value, check for the remaining elements
6405 for (j
= i
+1; j
< nunits
; ++j
)
6407 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6408 if (const_vector_elt_as_int (op
, elt2
) != 0)
6412 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6415 else if ((elt_val
& mask
) == mask
)
6417 for (j
= i
+1; j
< nunits
; ++j
)
6419 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6420 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6424 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6432 /* If all elements are equal, we don't need to do VLSDOI. */
6437 /* Return true if OP is of the given MODE and can be synthesized
6438 with a vspltisb, vspltish or vspltisw. */
6441 easy_altivec_constant (rtx op
, machine_mode mode
)
6443 unsigned step
, copies
;
6445 if (mode
== VOIDmode
)
6446 mode
= GET_MODE (op
);
6447 else if (mode
!= GET_MODE (op
))
6450 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6452 if (mode
== V2DFmode
)
6453 return zero_constant (op
, mode
);
6455 else if (mode
== V2DImode
)
6457 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6458 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6461 if (zero_constant (op
, mode
))
6464 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6465 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6471 /* V1TImode is a special container for TImode. Ignore for now. */
6472 else if (mode
== V1TImode
)
6475 /* Start with a vspltisw. */
6476 step
= GET_MODE_NUNITS (mode
) / 4;
6479 if (vspltis_constant (op
, step
, copies
))
6482 /* Then try with a vspltish. */
6488 if (vspltis_constant (op
, step
, copies
))
6491 /* And finally a vspltisb. */
6497 if (vspltis_constant (op
, step
, copies
))
6500 if (vspltis_shifted (op
) != 0)
6506 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6507 result is OP. Abort if it is not possible. */
6510 gen_easy_altivec_constant (rtx op
)
6512 machine_mode mode
= GET_MODE (op
);
6513 int nunits
= GET_MODE_NUNITS (mode
);
6514 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6515 unsigned step
= nunits
/ 4;
6516 unsigned copies
= 1;
6518 /* Start with a vspltisw. */
6519 if (vspltis_constant (op
, step
, copies
))
6520 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6522 /* Then try with a vspltish. */
6528 if (vspltis_constant (op
, step
, copies
))
6529 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6531 /* And finally a vspltisb. */
6537 if (vspltis_constant (op
, step
, copies
))
6538 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6543 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6544 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6546 Return the number of instructions needed (1 or 2) into the address pointed
6549 Return the constant that is being split via CONSTANT_PTR. */
6552 xxspltib_constant_p (rtx op
,
6557 size_t nunits
= GET_MODE_NUNITS (mode
);
6559 HOST_WIDE_INT value
;
6562 /* Set the returned values to out of bound values. */
6563 *num_insns_ptr
= -1;
6564 *constant_ptr
= 256;
6566 if (!TARGET_P9_VECTOR
)
6569 if (mode
== VOIDmode
)
6570 mode
= GET_MODE (op
);
6572 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6575 /* Handle (vec_duplicate <constant>). */
6576 if (GET_CODE (op
) == VEC_DUPLICATE
)
6578 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6579 && mode
!= V2DImode
)
6582 element
= XEXP (op
, 0);
6583 if (!CONST_INT_P (element
))
6586 value
= INTVAL (element
);
6587 if (!IN_RANGE (value
, -128, 127))
6591 /* Handle (const_vector [...]). */
6592 else if (GET_CODE (op
) == CONST_VECTOR
)
6594 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6595 && mode
!= V2DImode
)
6598 element
= CONST_VECTOR_ELT (op
, 0);
6599 if (!CONST_INT_P (element
))
6602 value
= INTVAL (element
);
6603 if (!IN_RANGE (value
, -128, 127))
6606 for (i
= 1; i
< nunits
; i
++)
6608 element
= CONST_VECTOR_ELT (op
, i
);
6609 if (!CONST_INT_P (element
))
6612 if (value
!= INTVAL (element
))
6617 /* Handle integer constants being loaded into the upper part of the VSX
6618 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6619 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6620 else if (CONST_INT_P (op
))
6622 if (!SCALAR_INT_MODE_P (mode
))
6625 value
= INTVAL (op
);
6626 if (!IN_RANGE (value
, -128, 127))
6629 if (!IN_RANGE (value
, -1, 0))
6631 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6634 if (EASY_VECTOR_15 (value
))
6642 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6643 sign extend. Special case 0/-1 to allow getting any VSX register instead
6644 of an Altivec register. */
6645 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6646 && EASY_VECTOR_15 (value
))
6649 /* Return # of instructions and the constant byte for XXSPLTIB. */
6650 if (mode
== V16QImode
)
6653 else if (IN_RANGE (value
, -1, 0))
6659 *constant_ptr
= (int) value
;
6664 output_vec_const_move (rtx
*operands
)
6672 mode
= GET_MODE (dest
);
6676 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6677 int xxspltib_value
= 256;
6680 if (zero_constant (vec
, mode
))
6682 if (TARGET_P9_VECTOR
)
6683 return "xxspltib %x0,0";
6685 else if (dest_vmx_p
)
6686 return "vspltisw %0,0";
6689 return "xxlxor %x0,%x0,%x0";
6692 if (all_ones_constant (vec
, mode
))
6694 if (TARGET_P9_VECTOR
)
6695 return "xxspltib %x0,255";
6697 else if (dest_vmx_p
)
6698 return "vspltisw %0,-1";
6700 else if (TARGET_P8_VECTOR
)
6701 return "xxlorc %x0,%x0,%x0";
6707 if (TARGET_P9_VECTOR
6708 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6712 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6713 return "xxspltib %x0,%2";
6724 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6725 if (zero_constant (vec
, mode
))
6726 return "vspltisw %0,0";
6728 if (all_ones_constant (vec
, mode
))
6729 return "vspltisw %0,-1";
6731 /* Do we need to construct a value using VSLDOI? */
6732 shift
= vspltis_shifted (vec
);
6736 splat_vec
= gen_easy_altivec_constant (vec
);
6737 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6738 operands
[1] = XEXP (splat_vec
, 0);
6739 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6742 switch (GET_MODE (splat_vec
))
6745 return "vspltisw %0,%1";
6748 return "vspltish %0,%1";
6751 return "vspltisb %0,%1";
6761 /* Initialize TARGET of vector PAIRED to VALS. */
6764 paired_expand_vector_init (rtx target
, rtx vals
)
6766 machine_mode mode
= GET_MODE (target
);
6767 int n_elts
= GET_MODE_NUNITS (mode
);
6769 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
6772 for (i
= 0; i
< n_elts
; ++i
)
6774 x
= XVECEXP (vals
, 0, i
);
6775 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6780 /* Load from constant pool. */
6781 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
6787 /* The vector is initialized only with non-constants. */
6788 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
6789 XVECEXP (vals
, 0, 1));
6791 emit_move_insn (target
, new_rtx
);
6795 /* One field is non-constant and the other one is a constant. Load the
6796 constant from the constant pool and use ps_merge instruction to
6797 construct the whole vector. */
6798 op1
= XVECEXP (vals
, 0, 0);
6799 op2
= XVECEXP (vals
, 0, 1);
6801 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
6803 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
6804 emit_move_insn (tmp
, constant_op
);
6806 if (CONSTANT_P (op1
))
6807 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
6809 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
6811 emit_move_insn (target
, new_rtx
);
6815 paired_expand_vector_move (rtx operands
[])
6817 rtx op0
= operands
[0], op1
= operands
[1];
6819 emit_move_insn (op0
, op1
);
6822 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6823 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6824 operands for the relation operation COND. This is a recursive
6828 paired_emit_vector_compare (enum rtx_code rcode
,
6829 rtx dest
, rtx op0
, rtx op1
,
6830 rtx cc_op0
, rtx cc_op1
)
6832 rtx tmp
= gen_reg_rtx (V2SFmode
);
6835 gcc_assert (TARGET_PAIRED_FLOAT
);
6836 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
6842 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6846 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6847 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
6851 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
6854 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6857 tmp1
= gen_reg_rtx (V2SFmode
);
6858 max
= gen_reg_rtx (V2SFmode
);
6859 min
= gen_reg_rtx (V2SFmode
);
6860 gen_reg_rtx (V2SFmode
);
6862 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6863 emit_insn (gen_selv2sf4
6864 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6865 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
6866 emit_insn (gen_selv2sf4
6867 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6868 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
6869 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
6872 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6875 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6878 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6881 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6884 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6893 /* Emit vector conditional expression.
6894 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6895 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6898 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
6899 rtx cond
, rtx cc_op0
, rtx cc_op1
)
6901 enum rtx_code rcode
= GET_CODE (cond
);
6903 if (!TARGET_PAIRED_FLOAT
)
6906 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
6911 /* Initialize vector TARGET to VALS. */
6914 rs6000_expand_vector_init (rtx target
, rtx vals
)
6916 machine_mode mode
= GET_MODE (target
);
6917 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6918 int n_elts
= GET_MODE_NUNITS (mode
);
6919 int n_var
= 0, one_var
= -1;
6920 bool all_same
= true, all_const_zero
= true;
6924 for (i
= 0; i
< n_elts
; ++i
)
6926 x
= XVECEXP (vals
, 0, i
);
6927 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6928 ++n_var
, one_var
= i
;
6929 else if (x
!= CONST0_RTX (inner_mode
))
6930 all_const_zero
= false;
6932 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6938 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6939 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6940 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6942 /* Zero register. */
6943 emit_move_insn (target
, CONST0_RTX (mode
));
6946 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6948 /* Splat immediate. */
6949 emit_insn (gen_rtx_SET (target
, const_vec
));
6954 /* Load from constant pool. */
6955 emit_move_insn (target
, const_vec
);
6960 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6961 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
6965 size_t num_elements
= all_same
? 1 : 2;
6966 for (i
= 0; i
< num_elements
; i
++)
6968 op
[i
] = XVECEXP (vals
, 0, i
);
6969 /* Just in case there is a SUBREG with a smaller mode, do a
6971 if (GET_MODE (op
[i
]) != inner_mode
)
6973 rtx tmp
= gen_reg_rtx (inner_mode
);
6974 convert_move (tmp
, op
[i
], 0);
6977 /* Allow load with splat double word. */
6978 else if (MEM_P (op
[i
]))
6981 op
[i
] = force_reg (inner_mode
, op
[i
]);
6983 else if (!REG_P (op
[i
]))
6984 op
[i
] = force_reg (inner_mode
, op
[i
]);
6989 if (mode
== V2DFmode
)
6990 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
6992 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
6996 if (mode
== V2DFmode
)
6997 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
6999 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
7004 /* Special case initializing vector int if we are on 64-bit systems with
7005 direct move or we have the ISA 3.0 instructions. */
7006 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
7007 && TARGET_DIRECT_MOVE_64BIT
)
7011 rtx element0
= XVECEXP (vals
, 0, 0);
7012 if (MEM_P (element0
))
7013 element0
= rs6000_address_for_fpconvert (element0
);
7015 element0
= force_reg (SImode
, element0
);
7017 if (TARGET_P9_VECTOR
)
7018 emit_insn (gen_vsx_splat_v4si (target
, element0
));
7021 rtx tmp
= gen_reg_rtx (DImode
);
7022 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
7023 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
7032 for (i
= 0; i
< 4; i
++)
7034 elements
[i
] = XVECEXP (vals
, 0, i
);
7035 if (!CONST_INT_P (elements
[i
]) && !REG_P (elements
[i
]))
7036 elements
[i
] = copy_to_mode_reg (SImode
, elements
[i
]);
7039 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
7040 elements
[2], elements
[3]));
7045 /* With single precision floating point on VSX, know that internally single
7046 precision is actually represented as a double, and either make 2 V2DF
7047 vectors, and convert these vectors to single precision, or do one
7048 conversion, and splat the result to the other elements. */
7049 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
7053 rtx element0
= XVECEXP (vals
, 0, 0);
7055 if (TARGET_P9_VECTOR
)
7057 if (MEM_P (element0
))
7058 element0
= rs6000_address_for_fpconvert (element0
);
7060 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
7065 rtx freg
= gen_reg_rtx (V4SFmode
);
7066 rtx sreg
= force_reg (SFmode
, element0
);
7067 rtx cvt
= (TARGET_XSCVDPSPN
7068 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
7069 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
7072 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
7078 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
7079 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
7080 rtx flt_even
= gen_reg_rtx (V4SFmode
);
7081 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
7082 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
7083 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
7084 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
7085 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
7087 /* Use VMRGEW if we can instead of doing a permute. */
7088 if (TARGET_P8_VECTOR
)
7090 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
7091 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
7092 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7093 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7094 if (BYTES_BIG_ENDIAN
)
7095 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
7097 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
7101 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
7102 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
7103 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7104 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7105 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
7111 /* Special case initializing vector short/char that are splats if we are on
7112 64-bit systems with direct move. */
7113 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
7114 && (mode
== V16QImode
|| mode
== V8HImode
))
7116 rtx op0
= XVECEXP (vals
, 0, 0);
7117 rtx di_tmp
= gen_reg_rtx (DImode
);
7120 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
7122 if (mode
== V16QImode
)
7124 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
7125 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
7129 if (mode
== V8HImode
)
7131 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
7132 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
7137 /* Store value to stack temp. Load vector element. Splat. However, splat
7138 of 64-bit items is not supported on Altivec. */
7139 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
7141 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7142 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
7143 XVECEXP (vals
, 0, 0));
7144 x
= gen_rtx_UNSPEC (VOIDmode
,
7145 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7146 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7148 gen_rtx_SET (target
, mem
),
7150 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
7151 gen_rtx_PARALLEL (VOIDmode
,
7152 gen_rtvec (1, const0_rtx
)));
7153 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
7157 /* One field is non-constant. Load constant then overwrite
7161 rtx copy
= copy_rtx (vals
);
7163 /* Load constant part of vector, substitute neighboring value for
7165 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
7166 rs6000_expand_vector_init (target
, copy
);
7168 /* Insert variable. */
7169 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
7173 /* Construct the vector in memory one field at a time
7174 and load the whole vector. */
7175 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7176 for (i
= 0; i
< n_elts
; i
++)
7177 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
7178 i
* GET_MODE_SIZE (inner_mode
)),
7179 XVECEXP (vals
, 0, i
));
7180 emit_move_insn (target
, mem
);
7183 /* Set field ELT of TARGET to VAL. */
7186 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
7188 machine_mode mode
= GET_MODE (target
);
7189 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7190 rtx reg
= gen_reg_rtx (mode
);
7192 int width
= GET_MODE_SIZE (inner_mode
);
7195 val
= force_reg (GET_MODE (val
), val
);
7197 if (VECTOR_MEM_VSX_P (mode
))
7199 rtx insn
= NULL_RTX
;
7200 rtx elt_rtx
= GEN_INT (elt
);
7202 if (mode
== V2DFmode
)
7203 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
7205 else if (mode
== V2DImode
)
7206 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
7208 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
7210 if (mode
== V4SImode
)
7211 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
7212 else if (mode
== V8HImode
)
7213 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
7214 else if (mode
== V16QImode
)
7215 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
7216 else if (mode
== V4SFmode
)
7217 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
7227 /* Simplify setting single element vectors like V1TImode. */
7228 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
7230 emit_move_insn (target
, gen_lowpart (mode
, val
));
7234 /* Load single variable value. */
7235 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7236 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
7237 x
= gen_rtx_UNSPEC (VOIDmode
,
7238 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7239 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7241 gen_rtx_SET (reg
, mem
),
7244 /* Linear sequence. */
7245 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
7246 for (i
= 0; i
< 16; ++i
)
7247 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
7249 /* Set permute mask to insert element into target. */
7250 for (i
= 0; i
< width
; ++i
)
7251 XVECEXP (mask
, 0, elt
*width
+ i
)
7252 = GEN_INT (i
+ 0x10);
7253 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
7255 if (BYTES_BIG_ENDIAN
)
7256 x
= gen_rtx_UNSPEC (mode
,
7257 gen_rtvec (3, target
, reg
,
7258 force_reg (V16QImode
, x
)),
7262 if (TARGET_P9_VECTOR
)
7263 x
= gen_rtx_UNSPEC (mode
,
7264 gen_rtvec (3, target
, reg
,
7265 force_reg (V16QImode
, x
)),
7269 /* Invert selector. We prefer to generate VNAND on P8 so
7270 that future fusion opportunities can kick in, but must
7271 generate VNOR elsewhere. */
7272 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
7273 rtx iorx
= (TARGET_P8_VECTOR
7274 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
7275 : gen_rtx_AND (V16QImode
, notx
, notx
));
7276 rtx tmp
= gen_reg_rtx (V16QImode
);
7277 emit_insn (gen_rtx_SET (tmp
, iorx
));
7279 /* Permute with operands reversed and adjusted selector. */
7280 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
7285 emit_insn (gen_rtx_SET (target
, x
));
7288 /* Extract field ELT from VEC into TARGET. */
7291 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
7293 machine_mode mode
= GET_MODE (vec
);
7294 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7297 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
7304 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
7305 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
7308 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
7311 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
7314 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
7317 if (TARGET_DIRECT_MOVE_64BIT
)
7319 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
7325 if (TARGET_DIRECT_MOVE_64BIT
)
7327 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
7333 if (TARGET_DIRECT_MOVE_64BIT
)
7335 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
7341 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
7342 && TARGET_DIRECT_MOVE_64BIT
)
7344 if (GET_MODE (elt
) != DImode
)
7346 rtx tmp
= gen_reg_rtx (DImode
);
7347 convert_move (tmp
, elt
, 0);
7350 else if (!REG_P (elt
))
7351 elt
= force_reg (DImode
, elt
);
7356 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
7360 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
7364 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
7368 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
7372 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
7376 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
7384 gcc_assert (CONST_INT_P (elt
));
7386 /* Allocate mode-sized buffer. */
7387 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7389 emit_move_insn (mem
, vec
);
7391 /* Add offset to field within buffer matching vector element. */
7392 mem
= adjust_address_nv (mem
, inner_mode
,
7393 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
7395 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
7398 /* Helper function to return the register number of a RTX. */
7400 regno_or_subregno (rtx op
)
7404 else if (SUBREG_P (op
))
7405 return subreg_regno (op
);
7410 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7411 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7412 temporary (BASE_TMP) to fixup the address. Return the new memory address
7413 that is valid for reads or writes to a given register (SCALAR_REG). */
7416 rs6000_adjust_vec_address (rtx scalar_reg
,
7420 machine_mode scalar_mode
)
7422 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7423 rtx addr
= XEXP (mem
, 0);
7428 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7429 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
7431 /* Calculate what we need to add to the address to get the element
7433 if (CONST_INT_P (element
))
7434 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
7437 int byte_shift
= exact_log2 (scalar_size
);
7438 gcc_assert (byte_shift
>= 0);
7440 if (byte_shift
== 0)
7441 element_offset
= element
;
7445 if (TARGET_POWERPC64
)
7446 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7448 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7450 element_offset
= base_tmp
;
7454 /* Create the new address pointing to the element within the vector. If we
7455 are adding 0, we don't have to change the address. */
7456 if (element_offset
== const0_rtx
)
7459 /* A simple indirect address can be converted into a reg + offset
7461 else if (REG_P (addr
) || SUBREG_P (addr
))
7462 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
7464 /* Optimize D-FORM addresses with constant offset with a constant element, to
7465 include the element offset in the address directly. */
7466 else if (GET_CODE (addr
) == PLUS
)
7468 rtx op0
= XEXP (addr
, 0);
7469 rtx op1
= XEXP (addr
, 1);
7472 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7473 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7475 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7476 rtx offset_rtx
= GEN_INT (offset
);
7478 if (IN_RANGE (offset
, -32768, 32767)
7479 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7480 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7483 emit_move_insn (base_tmp
, offset_rtx
);
7484 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7489 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7490 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7492 /* Note, ADDI requires the register being added to be a base
7493 register. If the register was R0, load it up into the temporary
7496 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7498 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7499 gcc_assert (insn
!= NULL_RTX
);
7504 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7506 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7507 gcc_assert (insn
!= NULL_RTX
);
7513 emit_move_insn (base_tmp
, op1
);
7514 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7517 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7523 emit_move_insn (base_tmp
, addr
);
7524 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7527 /* If we have a PLUS, we need to see whether the particular register class
7528 allows for D-FORM or X-FORM addressing. */
7529 if (GET_CODE (new_addr
) == PLUS
)
7531 rtx op1
= XEXP (new_addr
, 1);
7532 addr_mask_type addr_mask
;
7533 int scalar_regno
= regno_or_subregno (scalar_reg
);
7535 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7536 if (INT_REGNO_P (scalar_regno
))
7537 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7539 else if (FP_REGNO_P (scalar_regno
))
7540 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7542 else if (ALTIVEC_REGNO_P (scalar_regno
))
7543 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7548 if (REG_P (op1
) || SUBREG_P (op1
))
7549 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7551 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7554 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7555 valid_addr_p
= true;
7558 valid_addr_p
= false;
7562 emit_move_insn (base_tmp
, new_addr
);
7563 new_addr
= base_tmp
;
7566 return change_address (mem
, scalar_mode
, new_addr
);
7569 /* Split a variable vec_extract operation into the component instructions. */
7572 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7575 machine_mode mode
= GET_MODE (src
);
7576 machine_mode scalar_mode
= GET_MODE (dest
);
7577 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7578 int byte_shift
= exact_log2 (scalar_size
);
7580 gcc_assert (byte_shift
>= 0);
7582 /* If we are given a memory address, optimize to load just the element. We
7583 don't have to adjust the vector element number on little endian
7587 gcc_assert (REG_P (tmp_gpr
));
7588 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7589 tmp_gpr
, scalar_mode
));
7593 else if (REG_P (src
) || SUBREG_P (src
))
7595 int bit_shift
= byte_shift
+ 3;
7597 int dest_regno
= regno_or_subregno (dest
);
7598 int src_regno
= regno_or_subregno (src
);
7599 int element_regno
= regno_or_subregno (element
);
7601 gcc_assert (REG_P (tmp_gpr
));
7603 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7604 a general purpose register. */
7605 if (TARGET_P9_VECTOR
7606 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7607 && INT_REGNO_P (dest_regno
)
7608 && ALTIVEC_REGNO_P (src_regno
)
7609 && INT_REGNO_P (element_regno
))
7611 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7612 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7614 if (mode
== V16QImode
)
7615 emit_insn (VECTOR_ELT_ORDER_BIG
7616 ? gen_vextublx (dest_si
, element_si
, src
)
7617 : gen_vextubrx (dest_si
, element_si
, src
));
7619 else if (mode
== V8HImode
)
7621 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7622 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7623 emit_insn (VECTOR_ELT_ORDER_BIG
7624 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7625 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7631 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7632 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7633 emit_insn (VECTOR_ELT_ORDER_BIG
7634 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7635 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7642 gcc_assert (REG_P (tmp_altivec
));
7644 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7645 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7646 will shift the element into the upper position (adding 3 to convert a
7647 byte shift into a bit shift). */
7648 if (scalar_size
== 8)
7650 if (!VECTOR_ELT_ORDER_BIG
)
7652 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7658 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7660 emit_insn (gen_rtx_SET (tmp_gpr
,
7661 gen_rtx_AND (DImode
,
7662 gen_rtx_ASHIFT (DImode
,
7669 if (!VECTOR_ELT_ORDER_BIG
)
7671 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7673 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7674 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7680 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7683 /* Get the value into the lower byte of the Altivec register where VSLO
7685 if (TARGET_P9_VECTOR
)
7686 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7687 else if (can_create_pseudo_p ())
7688 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7691 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7692 emit_move_insn (tmp_di
, tmp_gpr
);
7693 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7696 /* Do the VSLO to get the value into the final location. */
7700 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7704 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7709 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7710 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7711 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7712 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7715 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7723 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7724 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7725 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7726 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7728 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7729 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7730 GEN_INT (64 - (8 * scalar_size
))));
7744 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7745 two SImode values. */
7748 rs6000_split_v4si_init_di_reg (rtx dest
, rtx si1
, rtx si2
, rtx tmp
)
7750 const unsigned HOST_WIDE_INT mask_32bit
= HOST_WIDE_INT_C (0xffffffff);
7752 if (CONST_INT_P (si1
) && CONST_INT_P (si2
))
7754 unsigned HOST_WIDE_INT const1
= (UINTVAL (si1
) & mask_32bit
) << 32;
7755 unsigned HOST_WIDE_INT const2
= UINTVAL (si2
) & mask_32bit
;
7757 emit_move_insn (dest
, GEN_INT (const1
| const2
));
7761 /* Put si1 into upper 32-bits of dest. */
7762 if (CONST_INT_P (si1
))
7763 emit_move_insn (dest
, GEN_INT ((UINTVAL (si1
) & mask_32bit
) << 32));
7766 /* Generate RLDIC. */
7767 rtx si1_di
= gen_rtx_REG (DImode
, regno_or_subregno (si1
));
7768 rtx shift_rtx
= gen_rtx_ASHIFT (DImode
, si1_di
, GEN_INT (32));
7769 rtx mask_rtx
= GEN_INT (mask_32bit
<< 32);
7770 rtx and_rtx
= gen_rtx_AND (DImode
, shift_rtx
, mask_rtx
);
7771 gcc_assert (!reg_overlap_mentioned_p (dest
, si1
));
7772 emit_insn (gen_rtx_SET (dest
, and_rtx
));
7775 /* Put si2 into the temporary. */
7776 gcc_assert (!reg_overlap_mentioned_p (dest
, tmp
));
7777 if (CONST_INT_P (si2
))
7778 emit_move_insn (tmp
, GEN_INT (UINTVAL (si2
) & mask_32bit
));
7780 emit_insn (gen_zero_extendsidi2 (tmp
, si2
));
7782 /* Combine the two parts. */
7783 emit_insn (gen_iordi3 (dest
, dest
, tmp
));
7787 /* Split a V4SI initialization. */
7790 rs6000_split_v4si_init (rtx operands
[])
7792 rtx dest
= operands
[0];
7794 /* Destination is a GPR, build up the two DImode parts in place. */
7795 if (REG_P (dest
) || SUBREG_P (dest
))
7797 int d_regno
= regno_or_subregno (dest
);
7798 rtx scalar1
= operands
[1];
7799 rtx scalar2
= operands
[2];
7800 rtx scalar3
= operands
[3];
7801 rtx scalar4
= operands
[4];
7802 rtx tmp1
= operands
[5];
7803 rtx tmp2
= operands
[6];
7805 /* Even though we only need one temporary (plus the destination, which
7806 has an early clobber constraint, try to use two temporaries, one for
7807 each double word created. That way the 2nd insn scheduling pass can
7808 rearrange things so the two parts are done in parallel. */
7809 if (BYTES_BIG_ENDIAN
)
7811 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
);
7812 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
+ 1);
7813 rs6000_split_v4si_init_di_reg (di_lo
, scalar1
, scalar2
, tmp1
);
7814 rs6000_split_v4si_init_di_reg (di_hi
, scalar3
, scalar4
, tmp2
);
7818 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
+ 1);
7819 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
);
7820 gcc_assert (!VECTOR_ELT_ORDER_BIG
);
7821 rs6000_split_v4si_init_di_reg (di_lo
, scalar4
, scalar3
, tmp1
);
7822 rs6000_split_v4si_init_di_reg (di_hi
, scalar2
, scalar1
, tmp2
);
7831 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7832 selects whether the alignment is abi mandated, optional, or
7833 both abi and optional alignment. */
7836 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7838 if (how
!= align_opt
)
7840 if (TREE_CODE (type
) == VECTOR_TYPE
)
7842 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
)))
7847 else if (align
< 128)
7852 if (how
!= align_abi
)
7854 if (TREE_CODE (type
) == ARRAY_TYPE
7855 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7857 if (align
< BITS_PER_WORD
)
7858 align
= BITS_PER_WORD
;
7865 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7866 instructions simply ignore the low bits; VSX memory instructions
7867 are aligned to 4 or 8 bytes. */
7870 rs6000_slow_unaligned_access (machine_mode mode
, unsigned int align
)
7872 return (STRICT_ALIGNMENT
7873 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7874 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && align
< 32)
7875 || ((VECTOR_MODE_P (mode
) || FLOAT128_VECTOR_P (mode
))
7876 && (int) align
< VECTOR_ALIGN (mode
)))));
7879 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7882 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7884 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7886 if (computed
!= 128)
7889 if (!warned
&& warn_psabi
)
7892 inform (input_location
,
7893 "the layout of aggregates containing vectors with"
7894 " %d-byte alignment has changed in GCC 5",
7895 computed
/ BITS_PER_UNIT
);
7898 /* In current GCC there is no special case. */
7905 /* AIX increases natural record alignment to doubleword if the first
7906 field is an FP double while the FP fields remain word aligned. */
7909 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7910 unsigned int specified
)
7912 unsigned int align
= MAX (computed
, specified
);
7913 tree field
= TYPE_FIELDS (type
);
7915 /* Skip all non field decls */
7916 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7917 field
= DECL_CHAIN (field
);
7919 if (field
!= NULL
&& field
!= type
)
7921 type
= TREE_TYPE (field
);
7922 while (TREE_CODE (type
) == ARRAY_TYPE
)
7923 type
= TREE_TYPE (type
);
7925 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7926 align
= MAX (align
, 64);
7932 /* Darwin increases record alignment to the natural alignment of
7936 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7937 unsigned int specified
)
7939 unsigned int align
= MAX (computed
, specified
);
7941 if (TYPE_PACKED (type
))
7944 /* Find the first field, looking down into aggregates. */
7946 tree field
= TYPE_FIELDS (type
);
7947 /* Skip all non field decls */
7948 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7949 field
= DECL_CHAIN (field
);
7952 /* A packed field does not contribute any extra alignment. */
7953 if (DECL_PACKED (field
))
7955 type
= TREE_TYPE (field
);
7956 while (TREE_CODE (type
) == ARRAY_TYPE
)
7957 type
= TREE_TYPE (type
);
7958 } while (AGGREGATE_TYPE_P (type
));
7960 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
7961 align
= MAX (align
, TYPE_ALIGN (type
));
7966 /* Return 1 for an operand in small memory on V.4/eabi. */
7969 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
7970 machine_mode mode ATTRIBUTE_UNUSED
)
7975 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
7978 if (DEFAULT_ABI
!= ABI_V4
)
7981 if (GET_CODE (op
) == SYMBOL_REF
)
7984 else if (GET_CODE (op
) != CONST
7985 || GET_CODE (XEXP (op
, 0)) != PLUS
7986 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
7987 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
7992 rtx sum
= XEXP (op
, 0);
7993 HOST_WIDE_INT summand
;
7995 /* We have to be careful here, because it is the referenced address
7996 that must be 32k from _SDA_BASE_, not just the symbol. */
7997 summand
= INTVAL (XEXP (sum
, 1));
7998 if (summand
< 0 || summand
> g_switch_value
)
8001 sym_ref
= XEXP (sum
, 0);
8004 return SYMBOL_REF_SMALL_P (sym_ref
);
8010 /* Return true if either operand is a general purpose register. */
8013 gpr_or_gpr_p (rtx op0
, rtx op1
)
8015 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
8016 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
8019 /* Return true if this is a move direct operation between GPR registers and
8020 floating point/VSX registers. */
8023 direct_move_p (rtx op0
, rtx op1
)
8027 if (!REG_P (op0
) || !REG_P (op1
))
8030 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
8033 regno0
= REGNO (op0
);
8034 regno1
= REGNO (op1
);
8035 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
8038 if (INT_REGNO_P (regno0
))
8039 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
8041 else if (INT_REGNO_P (regno1
))
8043 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
8046 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
8053 /* Return true if the OFFSET is valid for the quad address instructions that
8054 use d-form (register + offset) addressing. */
8057 quad_address_offset_p (HOST_WIDE_INT offset
)
8059 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
8062 /* Return true if the ADDR is an acceptable address for a quad memory
8063 operation of mode MODE (either LQ/STQ for general purpose registers, or
8064 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8065 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8066 3.0 LXV/STXV instruction. */
8069 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
8073 if (GET_MODE_SIZE (mode
) != 16)
8076 if (legitimate_indirect_address_p (addr
, strict
))
8079 if (VECTOR_MODE_P (mode
) && !mode_supports_vsx_dform_quad (mode
))
8082 if (GET_CODE (addr
) != PLUS
)
8085 op0
= XEXP (addr
, 0);
8086 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
8089 op1
= XEXP (addr
, 1);
8090 if (!CONST_INT_P (op1
))
8093 return quad_address_offset_p (INTVAL (op1
));
8096 /* Return true if this is a load or store quad operation. This function does
8097 not handle the atomic quad memory instructions. */
8100 quad_load_store_p (rtx op0
, rtx op1
)
8104 if (!TARGET_QUAD_MEMORY
)
8107 else if (REG_P (op0
) && MEM_P (op1
))
8108 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
8109 && quad_memory_operand (op1
, GET_MODE (op1
))
8110 && !reg_overlap_mentioned_p (op0
, op1
));
8112 else if (MEM_P (op0
) && REG_P (op1
))
8113 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
8114 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
8119 if (TARGET_DEBUG_ADDR
)
8121 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
8122 ret
? "true" : "false");
8123 debug_rtx (gen_rtx_SET (op0
, op1
));
8129 /* Given an address, return a constant offset term if one exists. */
8132 address_offset (rtx op
)
8134 if (GET_CODE (op
) == PRE_INC
8135 || GET_CODE (op
) == PRE_DEC
)
8137 else if (GET_CODE (op
) == PRE_MODIFY
8138 || GET_CODE (op
) == LO_SUM
)
8141 if (GET_CODE (op
) == CONST
)
8144 if (GET_CODE (op
) == PLUS
)
8147 if (CONST_INT_P (op
))
8153 /* Return true if the MEM operand is a memory operand suitable for use
8154 with a (full width, possibly multiple) gpr load/store. On
8155 powerpc64 this means the offset must be divisible by 4.
8156 Implements 'Y' constraint.
8158 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8159 a constraint function we know the operand has satisfied a suitable
8160 memory predicate. Also accept some odd rtl generated by reload
8161 (see rs6000_legitimize_reload_address for various forms). It is
8162 important that reload rtl be accepted by appropriate constraints
8163 but not by the operand predicate.
8165 Offsetting a lo_sum should not be allowed, except where we know by
8166 alignment that a 32k boundary is not crossed, but see the ???
8167 comment in rs6000_legitimize_reload_address. Note that by
8168 "offsetting" here we mean a further offset to access parts of the
8169 MEM. It's fine to have a lo_sum where the inner address is offset
8170 from a sym, since the same sym+offset will appear in the high part
8171 of the address calculation. */
8174 mem_operand_gpr (rtx op
, machine_mode mode
)
8176 unsigned HOST_WIDE_INT offset
;
8178 rtx addr
= XEXP (op
, 0);
8180 op
= address_offset (addr
);
8184 offset
= INTVAL (op
);
8185 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
8188 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8192 if (GET_CODE (addr
) == LO_SUM
)
8193 /* For lo_sum addresses, we must allow any offset except one that
8194 causes a wrap, so test only the low 16 bits. */
8195 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8197 return offset
+ 0x8000 < 0x10000u
- extra
;
8200 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8201 enforce an offset divisible by 4 even for 32-bit. */
8204 mem_operand_ds_form (rtx op
, machine_mode mode
)
8206 unsigned HOST_WIDE_INT offset
;
8208 rtx addr
= XEXP (op
, 0);
8210 if (!offsettable_address_p (false, mode
, addr
))
8213 op
= address_offset (addr
);
8217 offset
= INTVAL (op
);
8218 if ((offset
& 3) != 0)
8221 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8225 if (GET_CODE (addr
) == LO_SUM
)
8226 /* For lo_sum addresses, we must allow any offset except one that
8227 causes a wrap, so test only the low 16 bits. */
8228 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8230 return offset
+ 0x8000 < 0x10000u
- extra
;
8233 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8236 reg_offset_addressing_ok_p (machine_mode mode
)
8250 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8251 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8252 a vector mode, if we want to use the VSX registers to move it around,
8253 we need to restrict ourselves to reg+reg addressing. Similarly for
8254 IEEE 128-bit floating point that is passed in a single vector
8256 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
8257 return mode_supports_vsx_dform_quad (mode
);
8262 /* Paired vector modes. Only reg+reg addressing is valid. */
8263 if (TARGET_PAIRED_FLOAT
)
8268 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8269 addressing for the LFIWZX and STFIWX instructions. */
8270 if (TARGET_NO_SDMODE_STACK
)
8282 virtual_stack_registers_memory_p (rtx op
)
8286 if (GET_CODE (op
) == REG
)
8287 regnum
= REGNO (op
);
8289 else if (GET_CODE (op
) == PLUS
8290 && GET_CODE (XEXP (op
, 0)) == REG
8291 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
8292 regnum
= REGNO (XEXP (op
, 0));
8297 return (regnum
>= FIRST_VIRTUAL_REGISTER
8298 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
8301 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8302 is known to not straddle a 32k boundary. This function is used
8303 to determine whether -mcmodel=medium code can use TOC pointer
8304 relative addressing for OP. This means the alignment of the TOC
8305 pointer must also be taken into account, and unfortunately that is
8308 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8309 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8313 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
8317 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
8319 if (GET_CODE (op
) != SYMBOL_REF
)
8322 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8324 if (mode_supports_vsx_dform_quad (mode
))
8327 dsize
= GET_MODE_SIZE (mode
);
8328 decl
= SYMBOL_REF_DECL (op
);
8334 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8335 replacing memory addresses with an anchor plus offset. We
8336 could find the decl by rummaging around in the block->objects
8337 VEC for the given offset but that seems like too much work. */
8338 dalign
= BITS_PER_UNIT
;
8339 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
8340 && SYMBOL_REF_ANCHOR_P (op
)
8341 && SYMBOL_REF_BLOCK (op
) != NULL
)
8343 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
8345 dalign
= block
->alignment
;
8346 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
8348 else if (CONSTANT_POOL_ADDRESS_P (op
))
8350 /* It would be nice to have get_pool_align().. */
8351 machine_mode cmode
= get_pool_mode (op
);
8353 dalign
= GET_MODE_ALIGNMENT (cmode
);
8356 else if (DECL_P (decl
))
8358 dalign
= DECL_ALIGN (decl
);
8362 /* Allow BLKmode when the entire object is known to not
8363 cross a 32k boundary. */
8364 if (!DECL_SIZE_UNIT (decl
))
8367 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
8370 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
8374 dalign
/= BITS_PER_UNIT
;
8375 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8376 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8377 return dalign
>= dsize
;
8383 /* Find how many bits of the alignment we know for this access. */
8384 dalign
/= BITS_PER_UNIT
;
8385 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8386 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8388 lsb
= offset
& -offset
;
8392 return dalign
>= dsize
;
8396 constant_pool_expr_p (rtx op
)
8400 split_const (op
, &base
, &offset
);
8401 return (GET_CODE (base
) == SYMBOL_REF
8402 && CONSTANT_POOL_ADDRESS_P (base
)
8403 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
8406 /* These are only used to pass through from print_operand/print_operand_address
8407 to rs6000_output_addr_const_extra over the intervening function
8408 output_addr_const which is not target code. */
8409 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
8411 /* Return true if OP is a toc pointer relative address (the output
8412 of create_TOC_reference). If STRICT, do not match non-split
8413 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8414 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8415 TOCREL_OFFSET_RET respectively. */
8418 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
8419 const_rtx
*tocrel_offset_ret
)
8424 if (TARGET_CMODEL
!= CMODEL_SMALL
)
8426 /* When strict ensure we have everything tidy. */
8428 && !(GET_CODE (op
) == LO_SUM
8429 && REG_P (XEXP (op
, 0))
8430 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
8433 /* When not strict, allow non-split TOC addresses and also allow
8434 (lo_sum (high ..)) TOC addresses created during reload. */
8435 if (GET_CODE (op
) == LO_SUM
)
8439 const_rtx tocrel_base
= op
;
8440 const_rtx tocrel_offset
= const0_rtx
;
8442 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
8444 tocrel_base
= XEXP (op
, 0);
8445 tocrel_offset
= XEXP (op
, 1);
8448 if (tocrel_base_ret
)
8449 *tocrel_base_ret
= tocrel_base
;
8450 if (tocrel_offset_ret
)
8451 *tocrel_offset_ret
= tocrel_offset
;
8453 return (GET_CODE (tocrel_base
) == UNSPEC
8454 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
8457 /* Return true if X is a constant pool address, and also for cmodel=medium
8458 if X is a toc-relative address known to be offsettable within MODE. */
8461 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
8464 const_rtx tocrel_base
, tocrel_offset
;
8465 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
8466 && (TARGET_CMODEL
!= CMODEL_MEDIUM
8467 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
8469 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
8470 INTVAL (tocrel_offset
), mode
)));
8474 legitimate_small_data_p (machine_mode mode
, rtx x
)
8476 return (DEFAULT_ABI
== ABI_V4
8477 && !flag_pic
&& !TARGET_TOC
8478 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
8479 && small_data_operand (x
, mode
));
8483 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
8484 bool strict
, bool worst_case
)
8486 unsigned HOST_WIDE_INT offset
;
8489 if (GET_CODE (x
) != PLUS
)
8491 if (!REG_P (XEXP (x
, 0)))
8493 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8495 if (mode_supports_vsx_dform_quad (mode
))
8496 return quad_address_p (x
, mode
, strict
);
8497 if (!reg_offset_addressing_ok_p (mode
))
8498 return virtual_stack_registers_memory_p (x
);
8499 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
8501 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8504 offset
= INTVAL (XEXP (x
, 1));
8510 /* Paired single modes: offset addressing isn't valid. */
8516 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8518 if (VECTOR_MEM_VSX_P (mode
))
8523 if (!TARGET_POWERPC64
)
8525 else if (offset
& 3)
8538 if (!TARGET_POWERPC64
)
8540 else if (offset
& 3)
8549 return offset
< 0x10000 - extra
;
8553 legitimate_indexed_address_p (rtx x
, int strict
)
8557 if (GET_CODE (x
) != PLUS
)
8563 return (REG_P (op0
) && REG_P (op1
)
8564 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8565 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8566 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8567 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8571 avoiding_indexed_address_p (machine_mode mode
)
8573 /* Avoid indexed addressing for modes that have non-indexed
8574 load/store instruction forms. */
8575 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8579 legitimate_indirect_address_p (rtx x
, int strict
)
8581 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8585 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8587 if (!TARGET_MACHO
|| !flag_pic
8588 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8592 if (GET_CODE (x
) != LO_SUM
)
8594 if (GET_CODE (XEXP (x
, 0)) != REG
)
8596 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8600 return CONSTANT_P (x
);
8604 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8606 if (GET_CODE (x
) != LO_SUM
)
8608 if (GET_CODE (XEXP (x
, 0)) != REG
)
8610 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8612 /* quad word addresses are restricted, and we can't use LO_SUM. */
8613 if (mode_supports_vsx_dform_quad (mode
))
8617 if (TARGET_ELF
|| TARGET_MACHO
)
8621 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8623 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8624 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8625 recognizes some LO_SUM addresses as valid although this
8626 function says opposite. In most cases, LRA through different
8627 transformations can generate correct code for address reloads.
8628 It can not manage only some LO_SUM cases. So we need to add
8629 code analogous to one in rs6000_legitimize_reload_address for
8630 LOW_SUM here saying that some addresses are still valid. */
8631 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8632 && small_toc_ref (x
, VOIDmode
));
8633 if (TARGET_TOC
&& ! large_toc_ok
)
8635 if (GET_MODE_NUNITS (mode
) != 1)
8637 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8638 && !(/* ??? Assume floating point reg based on mode? */
8639 TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
8640 && (mode
== DFmode
|| mode
== DDmode
)))
8643 return CONSTANT_P (x
) || large_toc_ok
;
8650 /* Try machine-dependent ways of modifying an illegitimate address
8651 to be legitimate. If we find one, return the new, valid address.
8652 This is used from only one place: `memory_address' in explow.c.
8654 OLDX is the address as it was before break_out_memory_refs was
8655 called. In some cases it is useful to look at this to decide what
8658 It is always safe for this function to do nothing. It exists to
8659 recognize opportunities to optimize the output.
8661 On RS/6000, first check for the sum of a register with a constant
8662 integer that is out of range. If so, generate code to add the
8663 constant with the low-order 16 bits masked to the register and force
8664 this result into another register (this can be done with `cau').
8665 Then generate an address of REG+(CONST&0xffff), allowing for the
8666 possibility of bit 16 being a one.
8668 Then check for the sum of a register and something not constant, try to
8669 load the other things into a register and return the sum. */
8672 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8677 if (!reg_offset_addressing_ok_p (mode
)
8678 || mode_supports_vsx_dform_quad (mode
))
8680 if (virtual_stack_registers_memory_p (x
))
8683 /* In theory we should not be seeing addresses of the form reg+0,
8684 but just in case it is generated, optimize it away. */
8685 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8686 return force_reg (Pmode
, XEXP (x
, 0));
8688 /* For TImode with load/store quad, restrict addresses to just a single
8689 pointer, so it works with both GPRs and VSX registers. */
8690 /* Make sure both operands are registers. */
8691 else if (GET_CODE (x
) == PLUS
8692 && (mode
!= TImode
|| !TARGET_VSX
))
8693 return gen_rtx_PLUS (Pmode
,
8694 force_reg (Pmode
, XEXP (x
, 0)),
8695 force_reg (Pmode
, XEXP (x
, 1)));
8697 return force_reg (Pmode
, x
);
8699 if (GET_CODE (x
) == SYMBOL_REF
)
8701 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8703 return rs6000_legitimize_tls_address (x
, model
);
8715 /* As in legitimate_offset_address_p we do not assume
8716 worst-case. The mode here is just a hint as to the registers
8717 used. A TImode is usually in gprs, but may actually be in
8718 fprs. Leave worst-case scenario for reload to handle via
8719 insn constraints. PTImode is only GPRs. */
8726 if (GET_CODE (x
) == PLUS
8727 && GET_CODE (XEXP (x
, 0)) == REG
8728 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8729 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8731 && !PAIRED_VECTOR_MODE (mode
))
8733 HOST_WIDE_INT high_int
, low_int
;
8735 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8736 if (low_int
>= 0x8000 - extra
)
8738 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8739 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8740 GEN_INT (high_int
)), 0);
8741 return plus_constant (Pmode
, sum
, low_int
);
8743 else if (GET_CODE (x
) == PLUS
8744 && GET_CODE (XEXP (x
, 0)) == REG
8745 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8746 && GET_MODE_NUNITS (mode
) == 1
8747 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8748 || (/* ??? Assume floating point reg based on mode? */
8749 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8750 && (mode
== DFmode
|| mode
== DDmode
)))
8751 && !avoiding_indexed_address_p (mode
))
8753 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8754 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8756 else if (PAIRED_VECTOR_MODE (mode
))
8760 /* We accept [reg + reg]. */
8762 if (GET_CODE (x
) == PLUS
)
8764 rtx op1
= XEXP (x
, 0);
8765 rtx op2
= XEXP (x
, 1);
8768 op1
= force_reg (Pmode
, op1
);
8769 op2
= force_reg (Pmode
, op2
);
8771 /* We can't always do [reg + reg] for these, because [reg +
8772 reg + offset] is not a legitimate addressing mode. */
8773 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
8775 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
8776 return force_reg (Pmode
, y
);
8781 return force_reg (Pmode
, x
);
8783 else if ((TARGET_ELF
8785 || !MACHO_DYNAMIC_NO_PIC_P
8791 && GET_CODE (x
) != CONST_INT
8792 && GET_CODE (x
) != CONST_WIDE_INT
8793 && GET_CODE (x
) != CONST_DOUBLE
8795 && GET_MODE_NUNITS (mode
) == 1
8796 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8797 || (/* ??? Assume floating point reg based on mode? */
8798 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8799 && (mode
== DFmode
|| mode
== DDmode
))))
8801 rtx reg
= gen_reg_rtx (Pmode
);
8803 emit_insn (gen_elf_high (reg
, x
));
8805 emit_insn (gen_macho_high (reg
, x
));
8806 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8809 && GET_CODE (x
) == SYMBOL_REF
8810 && constant_pool_expr_p (x
)
8811 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8812 return create_TOC_reference (x
, NULL_RTX
);
8817 /* Debug version of rs6000_legitimize_address. */
8819 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8825 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8826 insns
= get_insns ();
8832 "\nrs6000_legitimize_address: mode %s, old code %s, "
8833 "new code %s, modified\n",
8834 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8835 GET_RTX_NAME (GET_CODE (ret
)));
8837 fprintf (stderr
, "Original address:\n");
8840 fprintf (stderr
, "oldx:\n");
8843 fprintf (stderr
, "New address:\n");
8848 fprintf (stderr
, "Insns added:\n");
8849 debug_rtx_list (insns
, 20);
8855 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8856 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8867 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8868 We need to emit DTP-relative relocations. */
8870 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8872 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8877 fputs ("\t.long\t", file
);
8880 fputs (DOUBLE_INT_ASM_OP
, file
);
8885 output_addr_const (file
, x
);
8887 fputs ("@dtprel+0x8000", file
);
8888 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8890 switch (SYMBOL_REF_TLS_MODEL (x
))
8894 case TLS_MODEL_LOCAL_EXEC
:
8895 fputs ("@le", file
);
8897 case TLS_MODEL_INITIAL_EXEC
:
8898 fputs ("@ie", file
);
8900 case TLS_MODEL_GLOBAL_DYNAMIC
:
8901 case TLS_MODEL_LOCAL_DYNAMIC
:
8910 /* Return true if X is a symbol that refers to real (rather than emulated)
8914 rs6000_real_tls_symbol_ref_p (rtx x
)
8916 return (GET_CODE (x
) == SYMBOL_REF
8917 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8920 /* In the name of slightly smaller debug output, and to cater to
8921 general assembler lossage, recognize various UNSPEC sequences
8922 and turn them back into a direct symbol reference. */
8925 rs6000_delegitimize_address (rtx orig_x
)
8929 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8935 if (TARGET_CMODEL
!= CMODEL_SMALL
8936 && GET_CODE (y
) == LO_SUM
)
8940 if (GET_CODE (y
) == PLUS
8941 && GET_MODE (y
) == Pmode
8942 && CONST_INT_P (XEXP (y
, 1)))
8944 offset
= XEXP (y
, 1);
8948 if (GET_CODE (y
) == UNSPEC
8949 && XINT (y
, 1) == UNSPEC_TOCREL
)
8951 y
= XVECEXP (y
, 0, 0);
8954 /* Do not associate thread-local symbols with the original
8955 constant pool symbol. */
8957 && GET_CODE (y
) == SYMBOL_REF
8958 && CONSTANT_POOL_ADDRESS_P (y
)
8959 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
8963 if (offset
!= NULL_RTX
)
8964 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
8965 if (!MEM_P (orig_x
))
8968 return replace_equiv_address_nv (orig_x
, y
);
8972 && GET_CODE (orig_x
) == LO_SUM
8973 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
8975 y
= XEXP (XEXP (orig_x
, 1), 0);
8976 if (GET_CODE (y
) == UNSPEC
8977 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
8978 return XVECEXP (y
, 0, 0);
8984 /* Return true if X shouldn't be emitted into the debug info.
8985 The linker doesn't like .toc section references from
8986 .debug_* sections, so reject .toc section symbols. */
8989 rs6000_const_not_ok_for_debug_p (rtx x
)
8991 if (GET_CODE (x
) == UNSPEC
)
8993 if (GET_CODE (x
) == SYMBOL_REF
8994 && CONSTANT_POOL_ADDRESS_P (x
))
8996 rtx c
= get_pool_constant (x
);
8997 machine_mode cmode
= get_pool_mode (x
);
8998 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
9006 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9009 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
9011 int icode
= INSN_CODE (insn
);
9013 /* Reject creating doloop insns. Combine should not be allowed
9014 to create these for a number of reasons:
9015 1) In a nested loop, if combine creates one of these in an
9016 outer loop and the register allocator happens to allocate ctr
9017 to the outer loop insn, then the inner loop can't use ctr.
9018 Inner loops ought to be more highly optimized.
9019 2) Combine often wants to create one of these from what was
9020 originally a three insn sequence, first combining the three
9021 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9022 allocated ctr, the splitter takes use back to the three insn
9023 sequence. It's better to stop combine at the two insn
9025 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9026 insns, the register allocator sometimes uses floating point
9027 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9028 jump insn and output reloads are not implemented for jumps,
9029 the ctrsi/ctrdi splitters need to handle all possible cases.
9030 That's a pain, and it gets to be seriously difficult when a
9031 splitter that runs after reload needs memory to transfer from
9032 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9033 for the difficult case. It's better to not create problems
9034 in the first place. */
9035 if (icode
!= CODE_FOR_nothing
9036 && (icode
== CODE_FOR_ctrsi_internal1
9037 || icode
== CODE_FOR_ctrdi_internal1
9038 || icode
== CODE_FOR_ctrsi_internal2
9039 || icode
== CODE_FOR_ctrdi_internal2
))
9045 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9047 static GTY(()) rtx rs6000_tls_symbol
;
9049 rs6000_tls_get_addr (void)
9051 if (!rs6000_tls_symbol
)
9052 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
9054 return rs6000_tls_symbol
;
9057 /* Construct the SYMBOL_REF for TLS GOT references. */
9059 static GTY(()) rtx rs6000_got_symbol
;
9061 rs6000_got_sym (void)
9063 if (!rs6000_got_symbol
)
9065 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9066 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
9067 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
9070 return rs6000_got_symbol
;
9073 /* AIX Thread-Local Address support. */
9076 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
9078 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
9082 name
= XSTR (addr
, 0);
9083 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9084 or the symbol will be in TLS private data section. */
9085 if (name
[strlen (name
) - 1] != ']'
9086 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
9087 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
9089 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
9090 strcpy (tlsname
, name
);
9092 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
9093 tlsaddr
= copy_rtx (addr
);
9094 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
9099 /* Place addr into TOC constant pool. */
9100 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
9102 /* Output the TOC entry and create the MEM referencing the value. */
9103 if (constant_pool_expr_p (XEXP (sym
, 0))
9104 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
9106 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
9107 mem
= gen_const_mem (Pmode
, tocref
);
9108 set_mem_alias_set (mem
, get_TOC_alias_set ());
9113 /* Use global-dynamic for local-dynamic. */
9114 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
9115 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
9117 /* Create new TOC reference for @m symbol. */
9118 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
9119 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
9120 strcpy (tlsname
, "*LCM");
9121 strcat (tlsname
, name
+ 3);
9122 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
9123 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
9124 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
9125 rtx modmem
= gen_const_mem (Pmode
, tocref
);
9126 set_mem_alias_set (modmem
, get_TOC_alias_set ());
9128 rtx modreg
= gen_reg_rtx (Pmode
);
9129 emit_insn (gen_rtx_SET (modreg
, modmem
));
9131 tmpreg
= gen_reg_rtx (Pmode
);
9132 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9134 dest
= gen_reg_rtx (Pmode
);
9136 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
9138 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
9141 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9142 else if (TARGET_32BIT
)
9144 tlsreg
= gen_reg_rtx (SImode
);
9145 emit_insn (gen_tls_get_tpointer (tlsreg
));
9148 tlsreg
= gen_rtx_REG (DImode
, 13);
9150 /* Load the TOC value into temporary register. */
9151 tmpreg
= gen_reg_rtx (Pmode
);
9152 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9153 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
9154 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
9156 /* Add TOC symbol value to TLS pointer. */
9157 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
9162 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9163 this (thread-local) address. */
9166 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
9171 return rs6000_legitimize_tls_address_aix (addr
, model
);
9173 dest
= gen_reg_rtx (Pmode
);
9174 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
9180 tlsreg
= gen_rtx_REG (Pmode
, 13);
9181 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
9185 tlsreg
= gen_rtx_REG (Pmode
, 2);
9186 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
9190 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
9194 tmp
= gen_reg_rtx (Pmode
);
9197 tlsreg
= gen_rtx_REG (Pmode
, 13);
9198 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
9202 tlsreg
= gen_rtx_REG (Pmode
, 2);
9203 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
9207 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
9209 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
9214 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
9216 /* We currently use relocations like @got@tlsgd for tls, which
9217 means the linker will handle allocation of tls entries, placing
9218 them in the .got section. So use a pointer to the .got section,
9219 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9220 or to secondary GOT sections used by 32-bit -fPIC. */
9222 got
= gen_rtx_REG (Pmode
, 2);
9226 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
9229 rtx gsym
= rs6000_got_sym ();
9230 got
= gen_reg_rtx (Pmode
);
9232 rs6000_emit_move (got
, gsym
, Pmode
);
9237 tmp1
= gen_reg_rtx (Pmode
);
9238 tmp2
= gen_reg_rtx (Pmode
);
9239 mem
= gen_const_mem (Pmode
, tmp1
);
9240 lab
= gen_label_rtx ();
9241 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
9242 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
9243 if (TARGET_LINK_STACK
)
9244 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
9245 emit_move_insn (tmp2
, mem
);
9246 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
9247 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
9252 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
9254 tga
= rs6000_tls_get_addr ();
9255 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
9258 r3
= gen_rtx_REG (Pmode
, 3);
9259 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9262 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
9264 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
9266 else if (DEFAULT_ABI
== ABI_V4
)
9267 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
9270 call_insn
= last_call_insn ();
9271 PATTERN (call_insn
) = insn
;
9272 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9273 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9274 pic_offset_table_rtx
);
9276 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
9278 tga
= rs6000_tls_get_addr ();
9279 tmp1
= gen_reg_rtx (Pmode
);
9280 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
9283 r3
= gen_rtx_REG (Pmode
, 3);
9284 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9287 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
9289 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
9291 else if (DEFAULT_ABI
== ABI_V4
)
9292 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
9295 call_insn
= last_call_insn ();
9296 PATTERN (call_insn
) = insn
;
9297 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9298 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9299 pic_offset_table_rtx
);
9301 if (rs6000_tls_size
== 16)
9304 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
9306 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
9308 else if (rs6000_tls_size
== 32)
9310 tmp2
= gen_reg_rtx (Pmode
);
9312 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
9314 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
9317 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
9319 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
9323 tmp2
= gen_reg_rtx (Pmode
);
9325 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
9327 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
9329 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
9335 /* IE, or 64-bit offset LE. */
9336 tmp2
= gen_reg_rtx (Pmode
);
9338 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
9340 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
9343 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
9345 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
9353 /* Only create the global variable for the stack protect guard if we are using
9354 the global flavor of that guard. */
9356 rs6000_init_stack_protect_guard (void)
9358 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
9359 return default_stack_protect_guard ();
9364 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9367 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
9369 if (GET_CODE (x
) == HIGH
9370 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
9373 /* A TLS symbol in the TOC cannot contain a sum. */
9374 if (GET_CODE (x
) == CONST
9375 && GET_CODE (XEXP (x
, 0)) == PLUS
9376 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
9377 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
9380 /* Do not place an ELF TLS symbol in the constant pool. */
9381 return TARGET_ELF
&& tls_referenced_p (x
);
9384 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9385 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9386 can be addressed relative to the toc pointer. */
9389 use_toc_relative_ref (rtx sym
, machine_mode mode
)
9391 return ((constant_pool_expr_p (sym
)
9392 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
9393 get_pool_mode (sym
)))
9394 || (TARGET_CMODEL
== CMODEL_MEDIUM
9395 && SYMBOL_REF_LOCAL_P (sym
)
9396 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
9399 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9400 replace the input X, or the original X if no replacement is called for.
9401 The output parameter *WIN is 1 if the calling macro should goto WIN,
9404 For RS/6000, we wish to handle large displacements off a base
9405 register by splitting the addend across an addiu/addis and the mem insn.
9406 This cuts number of extra insns needed from 3 to 1.
9408 On Darwin, we use this to generate code for floating point constants.
9409 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9410 The Darwin code is inside #if TARGET_MACHO because only then are the
9411 machopic_* functions defined. */
9413 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
9414 int opnum
, int type
,
9415 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
9417 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9418 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9420 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9421 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9424 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
9425 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
9426 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
9427 && TARGET_P9_VECTOR
)
9428 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
9429 && TARGET_P9_VECTOR
)))
9430 reg_offset_p
= false;
9432 /* We must recognize output that we have already generated ourselves. */
9433 if (GET_CODE (x
) == PLUS
9434 && GET_CODE (XEXP (x
, 0)) == PLUS
9435 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9436 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9437 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9439 if (TARGET_DEBUG_ADDR
)
9441 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
9444 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9445 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9446 opnum
, (enum reload_type
) type
);
9451 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9452 if (GET_CODE (x
) == LO_SUM
9453 && GET_CODE (XEXP (x
, 0)) == HIGH
)
9455 if (TARGET_DEBUG_ADDR
)
9457 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
9460 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9461 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9462 opnum
, (enum reload_type
) type
);
9468 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
9469 && GET_CODE (x
) == LO_SUM
9470 && GET_CODE (XEXP (x
, 0)) == PLUS
9471 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
9472 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
9473 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
9474 && machopic_operand_p (XEXP (x
, 1)))
9476 /* Result of previous invocation of this function on Darwin
9477 floating point constant. */
9478 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9479 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9480 opnum
, (enum reload_type
) type
);
9486 if (TARGET_CMODEL
!= CMODEL_SMALL
9489 && small_toc_ref (x
, VOIDmode
))
9491 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
9492 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
9493 if (TARGET_DEBUG_ADDR
)
9495 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
9498 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9499 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9500 opnum
, (enum reload_type
) type
);
9505 if (GET_CODE (x
) == PLUS
9506 && REG_P (XEXP (x
, 0))
9507 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
9508 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
9509 && CONST_INT_P (XEXP (x
, 1))
9511 && !PAIRED_VECTOR_MODE (mode
)
9512 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
9514 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
9515 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
9517 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9519 /* Check for 32-bit overflow or quad addresses with one of the
9520 four least significant bits set. */
9521 if (high
+ low
!= val
9522 || (quad_offset_p
&& (low
& 0xf)))
9528 /* Reload the high part into a base reg; leave the low part
9529 in the mem directly. */
9531 x
= gen_rtx_PLUS (GET_MODE (x
),
9532 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
9536 if (TARGET_DEBUG_ADDR
)
9538 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
9541 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9542 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9543 opnum
, (enum reload_type
) type
);
9548 if (GET_CODE (x
) == SYMBOL_REF
9551 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
9552 && !PAIRED_VECTOR_MODE (mode
)
9554 && DEFAULT_ABI
== ABI_DARWIN
9555 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
9556 && machopic_symbol_defined_p (x
)
9558 && DEFAULT_ABI
== ABI_V4
9561 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9562 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9564 ??? Assume floating point reg based on mode? This assumption is
9565 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9566 where reload ends up doing a DFmode load of a constant from
9567 mem using two gprs. Unfortunately, at this point reload
9568 hasn't yet selected regs so poking around in reload data
9569 won't help and even if we could figure out the regs reliably,
9570 we'd still want to allow this transformation when the mem is
9571 naturally aligned. Since we say the address is good here, we
9572 can't disable offsets from LO_SUMs in mem_operand_gpr.
9573 FIXME: Allow offset from lo_sum for other modes too, when
9574 mem is sufficiently aligned.
9576 Also disallow this if the type can go in VMX/Altivec registers, since
9577 those registers do not have d-form (reg+offset) address modes. */
9578 && !reg_addr
[mode
].scalar_in_vmx_p
9583 && (mode
!= TImode
|| !TARGET_VSX
)
9585 && (mode
!= DImode
|| TARGET_POWERPC64
)
9586 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9587 || (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)))
9592 rtx offset
= machopic_gen_offset (x
);
9593 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9594 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9595 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9599 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9600 gen_rtx_HIGH (Pmode
, x
), x
);
9602 if (TARGET_DEBUG_ADDR
)
9604 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9607 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9608 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9609 opnum
, (enum reload_type
) type
);
9614 /* Reload an offset address wrapped by an AND that represents the
9615 masking of the lower bits. Strip the outer AND and let reload
9616 convert the offset address into an indirect address. For VSX,
9617 force reload to create the address with an AND in a separate
9618 register, because we can't guarantee an altivec register will
9620 if (VECTOR_MEM_ALTIVEC_P (mode
)
9621 && GET_CODE (x
) == AND
9622 && GET_CODE (XEXP (x
, 0)) == PLUS
9623 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9624 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9625 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9626 && INTVAL (XEXP (x
, 1)) == -16)
9636 && GET_CODE (x
) == SYMBOL_REF
9637 && use_toc_relative_ref (x
, mode
))
9639 x
= create_TOC_reference (x
, NULL_RTX
);
9640 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9642 if (TARGET_DEBUG_ADDR
)
9644 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9647 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9648 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9649 opnum
, (enum reload_type
) type
);
9658 /* Debug version of rs6000_legitimize_reload_address. */
9660 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9661 int opnum
, int type
,
9662 int ind_levels
, int *win
)
9664 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9667 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9668 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9669 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9673 fprintf (stderr
, "Same address returned\n");
9675 fprintf (stderr
, "NULL returned\n");
9678 fprintf (stderr
, "New address:\n");
9685 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9686 that is a valid memory address for an instruction.
9687 The MODE argument is the machine mode for the MEM expression
9688 that wants to use this address.
9690 On the RS/6000, there are four valid address: a SYMBOL_REF that
9691 refers to a constant pool entry of an address (or the sum of it
9692 plus a constant), a short (16-bit signed) constant plus a register,
9693 the sum of two registers, or a register indirect, possibly with an
9694 auto-increment. For DFmode, DDmode and DImode with a constant plus
9695 register, we must ensure that both words are addressable or PowerPC64
9696 with offset word aligned.
9698 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9699 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9700 because adjacent memory cells are accessed by adding word-sized offsets
9701 during assembly output. */
9703 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9705 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9706 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9708 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9709 if (VECTOR_MEM_ALTIVEC_P (mode
)
9710 && GET_CODE (x
) == AND
9711 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9712 && INTVAL (XEXP (x
, 1)) == -16)
9715 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9717 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9720 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9721 && mode_supports_pre_incdec_p (mode
)
9722 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9724 /* Handle restricted vector d-form offsets in ISA 3.0. */
9727 if (quad_address_p (x
, mode
, reg_ok_strict
))
9730 else if (virtual_stack_registers_memory_p (x
))
9733 else if (reg_offset_p
)
9735 if (legitimate_small_data_p (mode
, x
))
9737 if (legitimate_constant_pool_address_p (x
, mode
,
9738 reg_ok_strict
|| lra_in_progress
))
9740 if (reg_addr
[mode
].fused_toc
&& GET_CODE (x
) == UNSPEC
9741 && XINT (x
, 1) == UNSPEC_FUSION_ADDIS
)
9745 /* For TImode, if we have TImode in VSX registers, only allow register
9746 indirect addresses. This will allow the values to go in either GPRs
9747 or VSX registers without reloading. The vector types would tend to
9748 go into VSX registers, so we allow REG+REG, while TImode seems
9749 somewhat split, in that some uses are GPR based, and some VSX based. */
9750 /* FIXME: We could loosen this by changing the following to
9751 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9752 but currently we cannot allow REG+REG addressing for TImode. See
9753 PR72827 for complete details on how this ends up hoodwinking DSE. */
9754 if (mode
== TImode
&& TARGET_VSX
)
9756 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9759 && GET_CODE (x
) == PLUS
9760 && GET_CODE (XEXP (x
, 0)) == REG
9761 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9762 || XEXP (x
, 0) == arg_pointer_rtx
)
9763 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9765 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9767 if (!FLOAT128_2REG_P (mode
)
9768 && ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
9770 || (mode
!= DFmode
&& mode
!= DDmode
))
9771 && (TARGET_POWERPC64
|| mode
!= DImode
)
9772 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9774 && !avoiding_indexed_address_p (mode
)
9775 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9777 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9778 && mode_supports_pre_modify_p (mode
)
9779 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9780 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9781 reg_ok_strict
, false)
9782 || (!avoiding_indexed_address_p (mode
)
9783 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9784 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9786 if (reg_offset_p
&& !quad_offset_p
9787 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9792 /* Debug version of rs6000_legitimate_address_p. */
9794 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9797 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9799 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9800 "strict = %d, reload = %s, code = %s\n",
9801 ret
? "true" : "false",
9802 GET_MODE_NAME (mode
),
9804 (reload_completed
? "after" : "before"),
9805 GET_RTX_NAME (GET_CODE (x
)));
9811 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9814 rs6000_mode_dependent_address_p (const_rtx addr
,
9815 addr_space_t as ATTRIBUTE_UNUSED
)
9817 return rs6000_mode_dependent_address_ptr (addr
);
9820 /* Go to LABEL if ADDR (a legitimate address expression)
9821 has an effect that depends on the machine mode it is used for.
9823 On the RS/6000 this is true of all integral offsets (since AltiVec
9824 and VSX modes don't allow them) or is a pre-increment or decrement.
9826 ??? Except that due to conceptual problems in offsettable_address_p
9827 we can't really report the problems of integral offsets. So leave
9828 this assuming that the adjustable offset must be valid for the
9829 sub-words of a TFmode operand, which is what we had before. */
9832 rs6000_mode_dependent_address (const_rtx addr
)
9834 switch (GET_CODE (addr
))
9837 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9838 is considered a legitimate address before reload, so there
9839 are no offset restrictions in that case. Note that this
9840 condition is safe in strict mode because any address involving
9841 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9842 been rejected as illegitimate. */
9843 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9844 && XEXP (addr
, 0) != arg_pointer_rtx
9845 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9847 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9848 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9853 /* Anything in the constant pool is sufficiently aligned that
9854 all bytes have the same high part address. */
9855 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9857 /* Auto-increment cases are now treated generically in recog.c. */
9859 return TARGET_UPDATE
;
9861 /* AND is only allowed in Altivec loads. */
9872 /* Debug version of rs6000_mode_dependent_address. */
9874 rs6000_debug_mode_dependent_address (const_rtx addr
)
9876 bool ret
= rs6000_mode_dependent_address (addr
);
9878 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9879 ret
? "true" : "false");
9885 /* Implement FIND_BASE_TERM. */
9888 rs6000_find_base_term (rtx op
)
9893 if (GET_CODE (base
) == CONST
)
9894 base
= XEXP (base
, 0);
9895 if (GET_CODE (base
) == PLUS
)
9896 base
= XEXP (base
, 0);
9897 if (GET_CODE (base
) == UNSPEC
)
9898 switch (XINT (base
, 1))
9901 case UNSPEC_MACHOPIC_OFFSET
:
9902 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9903 for aliasing purposes. */
9904 return XVECEXP (base
, 0, 0);
9910 /* More elaborate version of recog's offsettable_memref_p predicate
9911 that works around the ??? note of rs6000_mode_dependent_address.
9912 In particular it accepts
9914 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9916 in 32-bit mode, that the recog predicate rejects. */
9919 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
)
9926 /* First mimic offsettable_memref_p. */
9927 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
9930 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9931 the latter predicate knows nothing about the mode of the memory
9932 reference and, therefore, assumes that it is the largest supported
9933 mode (TFmode). As a consequence, legitimate offsettable memory
9934 references are rejected. rs6000_legitimate_offset_address_p contains
9935 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9936 at least with a little bit of help here given that we know the
9937 actual registers used. */
9938 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9939 || GET_MODE_SIZE (reg_mode
) == 4);
9940 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9944 /* Determine the reassociation width to be used in reassociate_bb.
9945 This takes into account how many parallel operations we
9946 can actually do of a given type, and also the latency.
9950 vect add/sub/mul 2/cycle
9951 fp add/sub/mul 2/cycle
9956 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
9961 case PROCESSOR_POWER8
:
9962 case PROCESSOR_POWER9
:
9963 if (DECIMAL_FLOAT_MODE_P (mode
))
9965 if (VECTOR_MODE_P (mode
))
9967 if (INTEGRAL_MODE_P (mode
))
9968 return opc
== MULT_EXPR
? 4 : 6;
9969 if (FLOAT_MODE_P (mode
))
9978 /* Change register usage conditional on target flags. */
9980 rs6000_conditional_register_usage (void)
9984 if (TARGET_DEBUG_TARGET
)
9985 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
9987 /* Set MQ register fixed (already call_used) so that it will not be
9991 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9993 fixed_regs
[13] = call_used_regs
[13]
9994 = call_really_used_regs
[13] = 1;
9996 /* Conditionally disable FPRs. */
9997 if (TARGET_SOFT_FLOAT
)
9998 for (i
= 32; i
< 64; i
++)
9999 fixed_regs
[i
] = call_used_regs
[i
]
10000 = call_really_used_regs
[i
] = 1;
10002 /* The TOC register is not killed across calls in a way that is
10003 visible to the compiler. */
10004 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10005 call_really_used_regs
[2] = 0;
10007 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
10008 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10010 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
10011 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10012 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10013 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10015 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
10016 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10017 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10018 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10020 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
10021 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10022 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10024 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
10026 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
10027 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10028 call_really_used_regs
[VRSAVE_REGNO
] = 1;
10031 if (TARGET_ALTIVEC
|| TARGET_VSX
)
10032 global_regs
[VSCR_REGNO
] = 1;
10034 if (TARGET_ALTIVEC_ABI
)
10036 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
10037 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10039 /* AIX reserves VR20:31 in non-extended ABI mode. */
10041 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
10042 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10047 /* Output insns to set DEST equal to the constant SOURCE as a series of
10048 lis, ori and shl instructions and return TRUE. */
10051 rs6000_emit_set_const (rtx dest
, rtx source
)
10053 machine_mode mode
= GET_MODE (dest
);
10058 gcc_checking_assert (CONST_INT_P (source
));
10059 c
= INTVAL (source
);
10064 emit_insn (gen_rtx_SET (dest
, source
));
10068 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
10070 emit_insn (gen_rtx_SET (copy_rtx (temp
),
10071 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
10072 emit_insn (gen_rtx_SET (dest
,
10073 gen_rtx_IOR (SImode
, copy_rtx (temp
),
10074 GEN_INT (c
& 0xffff))));
10078 if (!TARGET_POWERPC64
)
10082 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
10084 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
10086 emit_move_insn (hi
, GEN_INT (c
>> 32));
10087 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
10088 emit_move_insn (lo
, GEN_INT (c
));
10091 rs6000_emit_set_long_const (dest
, c
);
10095 gcc_unreachable ();
10098 insn
= get_last_insn ();
10099 set
= single_set (insn
);
10100 if (! CONSTANT_P (SET_SRC (set
)))
10101 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
10106 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10107 Output insns to set DEST equal to the constant C as a series of
10108 lis, ori and shl instructions. */
10111 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
10114 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
10124 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
10125 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
10126 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
10128 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
10129 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
10131 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10133 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10134 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10136 emit_move_insn (dest
,
10137 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10140 else if (ud3
== 0 && ud4
== 0)
10142 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10144 gcc_assert (ud2
& 0x8000);
10145 emit_move_insn (copy_rtx (temp
),
10146 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10148 emit_move_insn (copy_rtx (temp
),
10149 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10151 emit_move_insn (dest
,
10152 gen_rtx_ZERO_EXTEND (DImode
,
10153 gen_lowpart (SImode
,
10154 copy_rtx (temp
))));
10156 else if ((ud4
== 0xffff && (ud3
& 0x8000))
10157 || (ud4
== 0 && ! (ud3
& 0x8000)))
10159 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10161 emit_move_insn (copy_rtx (temp
),
10162 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
10164 emit_move_insn (copy_rtx (temp
),
10165 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10167 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10168 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10171 emit_move_insn (dest
,
10172 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10177 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10179 emit_move_insn (copy_rtx (temp
),
10180 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
10182 emit_move_insn (copy_rtx (temp
),
10183 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10186 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
10187 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10190 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10191 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10192 GEN_INT (ud2
<< 16)));
10194 emit_move_insn (dest
,
10195 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10200 /* Helper for the following. Get rid of [r+r] memory refs
10201 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10204 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
10206 if (GET_CODE (operands
[0]) == MEM
10207 && GET_CODE (XEXP (operands
[0], 0)) != REG
10208 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
10209 GET_MODE (operands
[0]), false))
10211 = replace_equiv_address (operands
[0],
10212 copy_addr_to_reg (XEXP (operands
[0], 0)));
10214 if (GET_CODE (operands
[1]) == MEM
10215 && GET_CODE (XEXP (operands
[1], 0)) != REG
10216 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
10217 GET_MODE (operands
[1]), false))
10219 = replace_equiv_address (operands
[1],
10220 copy_addr_to_reg (XEXP (operands
[1], 0)));
10223 /* Generate a vector of constants to permute MODE for a little-endian
10224 storage operation by swapping the two halves of a vector. */
10226 rs6000_const_vec (machine_mode mode
)
10254 v
= rtvec_alloc (subparts
);
10256 for (i
= 0; i
< subparts
/ 2; ++i
)
10257 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
10258 for (i
= subparts
/ 2; i
< subparts
; ++i
)
10259 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
10264 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10265 store operation. */
10267 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
10269 /* Scalar permutations are easier to express in integer modes rather than
10270 floating-point modes, so cast them here. We use V1TImode instead
10271 of TImode to ensure that the values don't go through GPRs. */
10272 if (FLOAT128_VECTOR_P (mode
))
10274 dest
= gen_lowpart (V1TImode
, dest
);
10275 source
= gen_lowpart (V1TImode
, source
);
10279 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10281 if (mode
== TImode
|| mode
== V1TImode
)
10282 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
10286 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
10287 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
10291 /* Emit a little-endian load from vector memory location SOURCE to VSX
10292 register DEST in mode MODE. The load is done with two permuting
10293 insn's that represent an lxvd2x and xxpermdi. */
10295 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
10297 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10299 if (mode
== TImode
|| mode
== V1TImode
)
10302 dest
= gen_lowpart (V2DImode
, dest
);
10303 source
= adjust_address (source
, V2DImode
, 0);
10306 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
10307 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10308 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10311 /* Emit a little-endian store to vector memory location DEST from VSX
10312 register SOURCE in mode MODE. The store is done with two permuting
10313 insn's that represent an xxpermdi and an stxvd2x. */
10315 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
10317 /* This should never be called during or after LRA, because it does
10318 not re-permute the source register. It is intended only for use
10320 gcc_assert (!lra_in_progress
&& !reload_completed
);
10322 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10324 if (mode
== TImode
|| mode
== V1TImode
)
10327 dest
= adjust_address (dest
, V2DImode
, 0);
10328 source
= gen_lowpart (V2DImode
, source
);
10331 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
10332 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10333 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10336 /* Emit a sequence representing a little-endian VSX load or store,
10337 moving data from SOURCE to DEST in mode MODE. This is done
10338 separately from rs6000_emit_move to ensure it is called only
10339 during expand. LE VSX loads and stores introduced later are
10340 handled with a split. The expand-time RTL generation allows
10341 us to optimize away redundant pairs of register-permutes. */
10343 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
10345 gcc_assert (!BYTES_BIG_ENDIAN
10346 && VECTOR_MEM_VSX_P (mode
)
10347 && !TARGET_P9_VECTOR
10348 && !gpr_or_gpr_p (dest
, source
)
10349 && (MEM_P (source
) ^ MEM_P (dest
)));
10351 if (MEM_P (source
))
10353 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
10354 rs6000_emit_le_vsx_load (dest
, source
, mode
);
10358 if (!REG_P (source
))
10359 source
= force_reg (mode
, source
);
10360 rs6000_emit_le_vsx_store (dest
, source
, mode
);
10364 /* Return whether a SFmode or SImode move can be done without converting one
10365 mode to another. This arrises when we have:
10367 (SUBREG:SF (REG:SI ...))
10368 (SUBREG:SI (REG:SF ...))
10370 and one of the values is in a floating point/vector register, where SFmode
10371 scalars are stored in DFmode format. */
10374 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
10376 if (TARGET_ALLOW_SF_SUBREG
)
10379 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
10382 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
10385 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10386 if (SUBREG_P (dest
))
10388 rtx dest_subreg
= SUBREG_REG (dest
);
10389 rtx src_subreg
= SUBREG_REG (src
);
10390 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
10397 /* Helper function to change moves with:
10399 (SUBREG:SF (REG:SI)) and
10400 (SUBREG:SI (REG:SF))
10402 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10403 values are stored as DFmode values in the VSX registers. We need to convert
10404 the bits before we can use a direct move or operate on the bits in the
10405 vector register as an integer type.
10407 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10410 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
10412 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
10413 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
10414 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
10416 rtx inner_source
= SUBREG_REG (source
);
10417 machine_mode inner_mode
= GET_MODE (inner_source
);
10419 if (mode
== SImode
&& inner_mode
== SFmode
)
10421 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
10425 if (mode
== SFmode
&& inner_mode
== SImode
)
10427 emit_insn (gen_movsf_from_si (dest
, inner_source
));
10435 /* Emit a move from SOURCE to DEST in mode MODE. */
10437 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
10440 operands
[0] = dest
;
10441 operands
[1] = source
;
10443 if (TARGET_DEBUG_ADDR
)
10446 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10447 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10448 GET_MODE_NAME (mode
),
10451 can_create_pseudo_p ());
10453 fprintf (stderr
, "source:\n");
10454 debug_rtx (source
);
10457 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10458 if (CONST_WIDE_INT_P (operands
[1])
10459 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
10461 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10462 gcc_unreachable ();
10465 /* See if we need to special case SImode/SFmode SUBREG moves. */
10466 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
10467 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
10470 /* Check if GCC is setting up a block move that will end up using FP
10471 registers as temporaries. We must make sure this is acceptable. */
10472 if (GET_CODE (operands
[0]) == MEM
10473 && GET_CODE (operands
[1]) == MEM
10475 && (rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[0]))
10476 || rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[1])))
10477 && ! (rs6000_slow_unaligned_access (SImode
,
10478 (MEM_ALIGN (operands
[0]) > 32
10479 ? 32 : MEM_ALIGN (operands
[0])))
10480 || rs6000_slow_unaligned_access (SImode
,
10481 (MEM_ALIGN (operands
[1]) > 32
10482 ? 32 : MEM_ALIGN (operands
[1]))))
10483 && ! MEM_VOLATILE_P (operands
[0])
10484 && ! MEM_VOLATILE_P (operands
[1]))
10486 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
10487 adjust_address (operands
[1], SImode
, 0));
10488 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
10489 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
10493 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
10494 && !gpc_reg_operand (operands
[1], mode
))
10495 operands
[1] = force_reg (mode
, operands
[1]);
10497 /* Recognize the case where operand[1] is a reference to thread-local
10498 data and load its address to a register. */
10499 if (tls_referenced_p (operands
[1]))
10501 enum tls_model model
;
10502 rtx tmp
= operands
[1];
10505 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
10507 addend
= XEXP (XEXP (tmp
, 0), 1);
10508 tmp
= XEXP (XEXP (tmp
, 0), 0);
10511 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
10512 model
= SYMBOL_REF_TLS_MODEL (tmp
);
10513 gcc_assert (model
!= 0);
10515 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
10518 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
10519 tmp
= force_operand (tmp
, operands
[0]);
10524 /* 128-bit constant floating-point values on Darwin should really be loaded
10525 as two parts. However, this premature splitting is a problem when DFmode
10526 values can go into Altivec registers. */
10527 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
10528 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
10530 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
10531 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
10533 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
10534 GET_MODE_SIZE (DFmode
)),
10535 simplify_gen_subreg (DFmode
, operands
[1], mode
,
10536 GET_MODE_SIZE (DFmode
)),
10541 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10542 p1:SD) if p1 is not of floating point class and p0 is spilled as
10543 we can have no analogous movsd_store for this. */
10544 if (lra_in_progress
&& mode
== DDmode
10545 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10546 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10547 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
10548 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
10551 int regno
= REGNO (SUBREG_REG (operands
[1]));
10553 if (regno
>= FIRST_PSEUDO_REGISTER
)
10555 cl
= reg_preferred_class (regno
);
10556 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10558 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10561 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10562 operands
[1] = SUBREG_REG (operands
[1]);
10565 if (lra_in_progress
10567 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10568 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10569 && (REG_P (operands
[1])
10570 || (GET_CODE (operands
[1]) == SUBREG
10571 && REG_P (SUBREG_REG (operands
[1])))))
10573 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10574 ? SUBREG_REG (operands
[1]) : operands
[1]);
10577 if (regno
>= FIRST_PSEUDO_REGISTER
)
10579 cl
= reg_preferred_class (regno
);
10580 gcc_assert (cl
!= NO_REGS
);
10581 regno
= ira_class_hard_regs
[cl
][0];
10583 if (FP_REGNO_P (regno
))
10585 if (GET_MODE (operands
[0]) != DDmode
)
10586 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10587 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10589 else if (INT_REGNO_P (regno
))
10590 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10595 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10596 p:DD)) if p0 is not of floating point class and p1 is spilled as
10597 we can have no analogous movsd_load for this. */
10598 if (lra_in_progress
&& mode
== DDmode
10599 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10600 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10601 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10602 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10605 int regno
= REGNO (SUBREG_REG (operands
[0]));
10607 if (regno
>= FIRST_PSEUDO_REGISTER
)
10609 cl
= reg_preferred_class (regno
);
10610 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10612 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10615 operands
[0] = SUBREG_REG (operands
[0]);
10616 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10619 if (lra_in_progress
10621 && (REG_P (operands
[0])
10622 || (GET_CODE (operands
[0]) == SUBREG
10623 && REG_P (SUBREG_REG (operands
[0]))))
10624 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10625 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10627 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10628 ? SUBREG_REG (operands
[0]) : operands
[0]);
10631 if (regno
>= FIRST_PSEUDO_REGISTER
)
10633 cl
= reg_preferred_class (regno
);
10634 gcc_assert (cl
!= NO_REGS
);
10635 regno
= ira_class_hard_regs
[cl
][0];
10637 if (FP_REGNO_P (regno
))
10639 if (GET_MODE (operands
[1]) != DDmode
)
10640 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10641 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10643 else if (INT_REGNO_P (regno
))
10644 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10650 /* FIXME: In the long term, this switch statement should go away
10651 and be replaced by a sequence of tests based on things like
10657 if (CONSTANT_P (operands
[1])
10658 && GET_CODE (operands
[1]) != CONST_INT
)
10659 operands
[1] = force_const_mem (mode
, operands
[1]);
10666 if (FLOAT128_2REG_P (mode
))
10667 rs6000_eliminate_indexed_memrefs (operands
);
10674 if (CONSTANT_P (operands
[1])
10675 && ! easy_fp_constant (operands
[1], mode
))
10676 operands
[1] = force_const_mem (mode
, operands
[1]);
10688 if (CONSTANT_P (operands
[1])
10689 && !easy_vector_constant (operands
[1], mode
))
10690 operands
[1] = force_const_mem (mode
, operands
[1]);
10695 /* Use default pattern for address of ELF small data */
10698 && DEFAULT_ABI
== ABI_V4
10699 && (GET_CODE (operands
[1]) == SYMBOL_REF
10700 || GET_CODE (operands
[1]) == CONST
)
10701 && small_data_operand (operands
[1], mode
))
10703 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10707 if (DEFAULT_ABI
== ABI_V4
10708 && mode
== Pmode
&& mode
== SImode
10709 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10711 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10715 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10719 && CONSTANT_P (operands
[1])
10720 && GET_CODE (operands
[1]) != HIGH
10721 && GET_CODE (operands
[1]) != CONST_INT
)
10723 rtx target
= (!can_create_pseudo_p ()
10725 : gen_reg_rtx (mode
));
10727 /* If this is a function address on -mcall-aixdesc,
10728 convert it to the address of the descriptor. */
10729 if (DEFAULT_ABI
== ABI_AIX
10730 && GET_CODE (operands
[1]) == SYMBOL_REF
10731 && XSTR (operands
[1], 0)[0] == '.')
10733 const char *name
= XSTR (operands
[1], 0);
10735 while (*name
== '.')
10737 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10738 CONSTANT_POOL_ADDRESS_P (new_ref
)
10739 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10740 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10741 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10742 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10743 operands
[1] = new_ref
;
10746 if (DEFAULT_ABI
== ABI_DARWIN
)
10749 if (MACHO_DYNAMIC_NO_PIC_P
)
10751 /* Take care of any required data indirection. */
10752 operands
[1] = rs6000_machopic_legitimize_pic_address (
10753 operands
[1], mode
, operands
[0]);
10754 if (operands
[0] != operands
[1])
10755 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10759 emit_insn (gen_macho_high (target
, operands
[1]));
10760 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10764 emit_insn (gen_elf_high (target
, operands
[1]));
10765 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10769 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10770 and we have put it in the TOC, we just need to make a TOC-relative
10771 reference to it. */
10773 && GET_CODE (operands
[1]) == SYMBOL_REF
10774 && use_toc_relative_ref (operands
[1], mode
))
10775 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10776 else if (mode
== Pmode
10777 && CONSTANT_P (operands
[1])
10778 && GET_CODE (operands
[1]) != HIGH
10779 && ((GET_CODE (operands
[1]) != CONST_INT
10780 && ! easy_fp_constant (operands
[1], mode
))
10781 || (GET_CODE (operands
[1]) == CONST_INT
10782 && (num_insns_constant (operands
[1], mode
)
10783 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10784 || (GET_CODE (operands
[0]) == REG
10785 && FP_REGNO_P (REGNO (operands
[0]))))
10786 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10787 && (TARGET_CMODEL
== CMODEL_SMALL
10788 || can_create_pseudo_p ()
10789 || (REG_P (operands
[0])
10790 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10794 /* Darwin uses a special PIC legitimizer. */
10795 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10798 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10800 if (operands
[0] != operands
[1])
10801 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10806 /* If we are to limit the number of things we put in the TOC and
10807 this is a symbol plus a constant we can add in one insn,
10808 just put the symbol in the TOC and add the constant. */
10809 if (GET_CODE (operands
[1]) == CONST
10810 && TARGET_NO_SUM_IN_TOC
10811 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10812 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10813 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10814 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10815 && ! side_effects_p (operands
[0]))
10818 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10819 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10821 sym
= force_reg (mode
, sym
);
10822 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10826 operands
[1] = force_const_mem (mode
, operands
[1]);
10829 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10830 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10832 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10834 operands
[1] = gen_const_mem (mode
, tocref
);
10835 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10841 if (!VECTOR_MEM_VSX_P (TImode
))
10842 rs6000_eliminate_indexed_memrefs (operands
);
10846 rs6000_eliminate_indexed_memrefs (operands
);
10850 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10853 /* Above, we may have called force_const_mem which may have returned
10854 an invalid address. If we can, fix this up; otherwise, reload will
10855 have to deal with it. */
10856 if (GET_CODE (operands
[1]) == MEM
)
10857 operands
[1] = validize_mem (operands
[1]);
10859 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10862 /* Nonzero if we can use a floating-point register to pass this arg. */
10863 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10864 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10865 && (CUM)->fregno <= FP_ARG_MAX_REG \
10866 && TARGET_HARD_FLOAT)
10868 /* Nonzero if we can use an AltiVec register to pass this arg. */
10869 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10870 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10871 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10872 && TARGET_ALTIVEC_ABI \
10875 /* Walk down the type tree of TYPE counting consecutive base elements.
10876 If *MODEP is VOIDmode, then set it to the first valid floating point
10877 or vector type. If a non-floating point or vector type is found, or
10878 if a floating point or vector type that doesn't match a non-VOIDmode
10879 *MODEP is found, then return -1, otherwise return the count in the
10883 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10886 HOST_WIDE_INT size
;
10888 switch (TREE_CODE (type
))
10891 mode
= TYPE_MODE (type
);
10892 if (!SCALAR_FLOAT_MODE_P (mode
))
10895 if (*modep
== VOIDmode
)
10898 if (*modep
== mode
)
10904 mode
= TYPE_MODE (TREE_TYPE (type
));
10905 if (!SCALAR_FLOAT_MODE_P (mode
))
10908 if (*modep
== VOIDmode
)
10911 if (*modep
== mode
)
10917 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10920 /* Use V4SImode as representative of all 128-bit vector types. */
10921 size
= int_size_in_bytes (type
);
10931 if (*modep
== VOIDmode
)
10934 /* Vector modes are considered to be opaque: two vectors are
10935 equivalent for the purposes of being homogeneous aggregates
10936 if they are the same size. */
10937 if (*modep
== mode
)
10945 tree index
= TYPE_DOMAIN (type
);
10947 /* Can't handle incomplete types nor sizes that are not
10949 if (!COMPLETE_TYPE_P (type
)
10950 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10953 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10956 || !TYPE_MAX_VALUE (index
)
10957 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
10958 || !TYPE_MIN_VALUE (index
)
10959 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
10963 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
10964 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
10966 /* There must be no padding. */
10967 if (wi::to_wide (TYPE_SIZE (type
))
10968 != count
* GET_MODE_BITSIZE (*modep
))
10980 /* Can't handle incomplete types nor sizes that are not
10982 if (!COMPLETE_TYPE_P (type
)
10983 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10986 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10988 if (TREE_CODE (field
) != FIELD_DECL
)
10991 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10994 count
+= sub_count
;
10997 /* There must be no padding. */
10998 if (wi::to_wide (TYPE_SIZE (type
))
10999 != count
* GET_MODE_BITSIZE (*modep
))
11006 case QUAL_UNION_TYPE
:
11008 /* These aren't very interesting except in a degenerate case. */
11013 /* Can't handle incomplete types nor sizes that are not
11015 if (!COMPLETE_TYPE_P (type
)
11016 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
11019 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
11021 if (TREE_CODE (field
) != FIELD_DECL
)
11024 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
11027 count
= count
> sub_count
? count
: sub_count
;
11030 /* There must be no padding. */
11031 if (wi::to_wide (TYPE_SIZE (type
))
11032 != count
* GET_MODE_BITSIZE (*modep
))
11045 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11046 float or vector aggregate that shall be passed in FP/vector registers
11047 according to the ELFv2 ABI, return the homogeneous element mode in
11048 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11050 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11053 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
11054 machine_mode
*elt_mode
,
11057 /* Note that we do not accept complex types at the top level as
11058 homogeneous aggregates; these types are handled via the
11059 targetm.calls.split_complex_arg mechanism. Complex types
11060 can be elements of homogeneous aggregates, however. */
11061 if (DEFAULT_ABI
== ABI_ELFv2
&& type
&& AGGREGATE_TYPE_P (type
))
11063 machine_mode field_mode
= VOIDmode
;
11064 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
11066 if (field_count
> 0)
11068 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
) ?
11069 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
11071 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11072 up to AGGR_ARG_NUM_REG registers. */
11073 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
11076 *elt_mode
= field_mode
;
11078 *n_elts
= field_count
;
11091 /* Return a nonzero value to say to return the function value in
11092 memory, just as large structures are always returned. TYPE will be
11093 the data type of the value, and FNTYPE will be the type of the
11094 function doing the returning, or @code{NULL} for libcalls.
11096 The AIX ABI for the RS/6000 specifies that all structures are
11097 returned in memory. The Darwin ABI does the same.
11099 For the Darwin 64 Bit ABI, a function result can be returned in
11100 registers or in memory, depending on the size of the return data
11101 type. If it is returned in registers, the value occupies the same
11102 registers as it would if it were the first and only function
11103 argument. Otherwise, the function places its result in memory at
11104 the location pointed to by GPR3.
11106 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11107 but a draft put them in memory, and GCC used to implement the draft
11108 instead of the final standard. Therefore, aix_struct_return
11109 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11110 compatibility can change DRAFT_V4_STRUCT_RET to override the
11111 default, and -m switches get the final word. See
11112 rs6000_option_override_internal for more details.
11114 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11115 long double support is enabled. These values are returned in memory.
11117 int_size_in_bytes returns -1 for variable size objects, which go in
11118 memory always. The cast to unsigned makes -1 > 8. */
11121 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
11123 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11125 && rs6000_darwin64_abi
11126 && TREE_CODE (type
) == RECORD_TYPE
11127 && int_size_in_bytes (type
) > 0)
11129 CUMULATIVE_ARGS valcum
;
11133 valcum
.fregno
= FP_ARG_MIN_REG
;
11134 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
11135 /* Do a trial code generation as if this were going to be passed
11136 as an argument; if any part goes in memory, we return NULL. */
11137 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
11140 /* Otherwise fall through to more conventional ABI rules. */
11143 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11144 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
11148 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11149 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
11150 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
11153 if (AGGREGATE_TYPE_P (type
)
11154 && (aix_struct_return
11155 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
11158 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11159 modes only exist for GCC vector types if -maltivec. */
11160 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
11161 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
11164 /* Return synthetic vectors in memory. */
11165 if (TREE_CODE (type
) == VECTOR_TYPE
11166 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
11168 static bool warned_for_return_big_vectors
= false;
11169 if (!warned_for_return_big_vectors
)
11171 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
11172 "non-standard ABI extension with no compatibility "
11174 warned_for_return_big_vectors
= true;
11179 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
11180 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
11186 /* Specify whether values returned in registers should be at the most
11187 significant end of a register. We want aggregates returned by
11188 value to match the way aggregates are passed to functions. */
11191 rs6000_return_in_msb (const_tree valtype
)
11193 return (DEFAULT_ABI
== ABI_ELFv2
11194 && BYTES_BIG_ENDIAN
11195 && AGGREGATE_TYPE_P (valtype
)
11196 && (rs6000_function_arg_padding (TYPE_MODE (valtype
), valtype
)
11200 #ifdef HAVE_AS_GNU_ATTRIBUTE
11201 /* Return TRUE if a call to function FNDECL may be one that
11202 potentially affects the function calling ABI of the object file. */
11205 call_ABI_of_interest (tree fndecl
)
11207 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
11209 struct cgraph_node
*c_node
;
11211 /* Libcalls are always interesting. */
11212 if (fndecl
== NULL_TREE
)
11215 /* Any call to an external function is interesting. */
11216 if (DECL_EXTERNAL (fndecl
))
11219 /* Interesting functions that we are emitting in this object file. */
11220 c_node
= cgraph_node::get (fndecl
);
11221 c_node
= c_node
->ultimate_alias_target ();
11222 return !c_node
->only_called_directly_p ();
11228 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11229 for a call to a function whose data type is FNTYPE.
11230 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11232 For incoming args we set the number of arguments in the prototype large
11233 so we never return a PARALLEL. */
11236 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
11237 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
11238 int libcall
, int n_named_args
,
11239 tree fndecl ATTRIBUTE_UNUSED
,
11240 machine_mode return_mode ATTRIBUTE_UNUSED
)
11242 static CUMULATIVE_ARGS zero_cumulative
;
11244 *cum
= zero_cumulative
;
11246 cum
->fregno
= FP_ARG_MIN_REG
;
11247 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
11248 cum
->prototype
= (fntype
&& prototype_p (fntype
));
11249 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
11250 ? CALL_LIBCALL
: CALL_NORMAL
);
11251 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
11252 cum
->stdarg
= stdarg_p (fntype
);
11253 cum
->libcall
= libcall
;
11255 cum
->nargs_prototype
= 0;
11256 if (incoming
|| cum
->prototype
)
11257 cum
->nargs_prototype
= n_named_args
;
11259 /* Check for a longcall attribute. */
11260 if ((!fntype
&& rs6000_default_long_calls
)
11262 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
11263 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
11264 cum
->call_cookie
|= CALL_LONG
;
11266 if (TARGET_DEBUG_ARG
)
11268 fprintf (stderr
, "\ninit_cumulative_args:");
11271 tree ret_type
= TREE_TYPE (fntype
);
11272 fprintf (stderr
, " ret code = %s,",
11273 get_tree_code_name (TREE_CODE (ret_type
)));
11276 if (cum
->call_cookie
& CALL_LONG
)
11277 fprintf (stderr
, " longcall,");
11279 fprintf (stderr
, " proto = %d, nargs = %d\n",
11280 cum
->prototype
, cum
->nargs_prototype
);
11283 #ifdef HAVE_AS_GNU_ATTRIBUTE
11284 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
11286 cum
->escapes
= call_ABI_of_interest (fndecl
);
11293 return_type
= TREE_TYPE (fntype
);
11294 return_mode
= TYPE_MODE (return_type
);
11297 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
11299 if (return_type
!= NULL
)
11301 if (TREE_CODE (return_type
) == RECORD_TYPE
11302 && TYPE_TRANSPARENT_AGGR (return_type
))
11304 return_type
= TREE_TYPE (first_field (return_type
));
11305 return_mode
= TYPE_MODE (return_type
);
11307 if (AGGREGATE_TYPE_P (return_type
)
11308 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
11310 rs6000_returns_struct
= true;
11312 if (SCALAR_FLOAT_MODE_P (return_mode
))
11314 rs6000_passes_float
= true;
11315 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11316 && (FLOAT128_IBM_P (return_mode
)
11317 || FLOAT128_IEEE_P (return_mode
)
11318 || (return_type
!= NULL
11319 && (TYPE_MAIN_VARIANT (return_type
)
11320 == long_double_type_node
))))
11321 rs6000_passes_long_double
= true;
11323 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
11324 || PAIRED_VECTOR_MODE (return_mode
))
11325 rs6000_passes_vector
= true;
11332 && TARGET_ALTIVEC_ABI
11333 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
11335 error ("cannot return value in vector register because"
11336 " altivec instructions are disabled, use %qs"
11337 " to enable them", "-maltivec");
11341 /* The mode the ABI uses for a word. This is not the same as word_mode
11342 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11344 static scalar_int_mode
11345 rs6000_abi_word_mode (void)
11347 return TARGET_32BIT
? SImode
: DImode
;
11350 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11352 rs6000_offload_options (void)
11355 return xstrdup ("-foffload-abi=lp64");
11357 return xstrdup ("-foffload-abi=ilp32");
11360 /* On rs6000, function arguments are promoted, as are function return
11363 static machine_mode
11364 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
11366 int *punsignedp ATTRIBUTE_UNUSED
,
11369 PROMOTE_MODE (mode
, *punsignedp
, type
);
11374 /* Return true if TYPE must be passed on the stack and not in registers. */
11377 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
11379 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
11380 return must_pass_in_stack_var_size (mode
, type
);
11382 return must_pass_in_stack_var_size_or_pad (mode
, type
);
11386 is_complex_IBM_long_double (machine_mode mode
)
11388 return mode
== ICmode
|| (!TARGET_IEEEQUAD
&& mode
== TCmode
);
11391 /* Whether ABI_V4 passes MODE args to a function in floating point
11395 abi_v4_pass_in_fpr (machine_mode mode
)
11397 if (!TARGET_HARD_FLOAT
)
11399 if (TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
11401 if (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
11403 /* ABI_V4 passes complex IBM long double in 8 gprs.
11404 Stupid, but we can't change the ABI now. */
11405 if (is_complex_IBM_long_double (mode
))
11407 if (FLOAT128_2REG_P (mode
))
11409 if (DECIMAL_FLOAT_MODE_P (mode
))
11414 /* Implement TARGET_FUNCTION_ARG_PADDING.
11416 For the AIX ABI structs are always stored left shifted in their
11419 static pad_direction
11420 rs6000_function_arg_padding (machine_mode mode
, const_tree type
)
11422 #ifndef AGGREGATE_PADDING_FIXED
11423 #define AGGREGATE_PADDING_FIXED 0
11425 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11426 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11429 if (!AGGREGATE_PADDING_FIXED
)
11431 /* GCC used to pass structures of the same size as integer types as
11432 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11433 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11434 passed padded downward, except that -mstrict-align further
11435 muddied the water in that multi-component structures of 2 and 4
11436 bytes in size were passed padded upward.
11438 The following arranges for best compatibility with previous
11439 versions of gcc, but removes the -mstrict-align dependency. */
11440 if (BYTES_BIG_ENDIAN
)
11442 HOST_WIDE_INT size
= 0;
11444 if (mode
== BLKmode
)
11446 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
11447 size
= int_size_in_bytes (type
);
11450 size
= GET_MODE_SIZE (mode
);
11452 if (size
== 1 || size
== 2 || size
== 4)
11453 return PAD_DOWNWARD
;
11458 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
11460 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
11464 /* Fall back to the default. */
11465 return default_function_arg_padding (mode
, type
);
11468 /* If defined, a C expression that gives the alignment boundary, in bits,
11469 of an argument with the specified mode and type. If it is not defined,
11470 PARM_BOUNDARY is used for all arguments.
11472 V.4 wants long longs and doubles to be double word aligned. Just
11473 testing the mode size is a boneheaded way to do this as it means
11474 that other types such as complex int are also double word aligned.
11475 However, we're stuck with this because changing the ABI might break
11476 existing library interfaces.
11478 Quadword align Altivec/VSX vectors.
11479 Quadword align large synthetic vector types. */
11481 static unsigned int
11482 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
11484 machine_mode elt_mode
;
11487 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11489 if (DEFAULT_ABI
== ABI_V4
11490 && (GET_MODE_SIZE (mode
) == 8
11491 || (TARGET_HARD_FLOAT
11492 && !is_complex_IBM_long_double (mode
)
11493 && FLOAT128_2REG_P (mode
))))
11495 else if (FLOAT128_VECTOR_P (mode
))
11497 else if (PAIRED_VECTOR_MODE (mode
)
11498 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11499 && int_size_in_bytes (type
) >= 8
11500 && int_size_in_bytes (type
) < 16))
11502 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11503 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11504 && int_size_in_bytes (type
) >= 16))
11507 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11508 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11509 -mcompat-align-parm is used. */
11510 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
11511 || DEFAULT_ABI
== ABI_ELFv2
)
11512 && type
&& TYPE_ALIGN (type
) > 64)
11514 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11515 or homogeneous float/vector aggregates here. We already handled
11516 vector aggregates above, but still need to check for float here. */
11517 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
11518 && !SCALAR_FLOAT_MODE_P (elt_mode
));
11520 /* We used to check for BLKmode instead of the above aggregate type
11521 check. Warn when this results in any difference to the ABI. */
11522 if (aggregate_p
!= (mode
== BLKmode
))
11524 static bool warned
;
11525 if (!warned
&& warn_psabi
)
11528 inform (input_location
,
11529 "the ABI of passing aggregates with %d-byte alignment"
11530 " has changed in GCC 5",
11531 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11539 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11540 implement the "aggregate type" check as a BLKmode check here; this
11541 means certain aggregate types are in fact not aligned. */
11542 if (TARGET_MACHO
&& rs6000_darwin64_abi
11544 && type
&& TYPE_ALIGN (type
) > 64)
11547 return PARM_BOUNDARY
;
11550 /* The offset in words to the start of the parameter save area. */
11552 static unsigned int
11553 rs6000_parm_offset (void)
11555 return (DEFAULT_ABI
== ABI_V4
? 2
11556 : DEFAULT_ABI
== ABI_ELFv2
? 4
11560 /* For a function parm of MODE and TYPE, return the starting word in
11561 the parameter area. NWORDS of the parameter area are already used. */
11563 static unsigned int
11564 rs6000_parm_start (machine_mode mode
, const_tree type
,
11565 unsigned int nwords
)
11567 unsigned int align
;
11569 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11570 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11573 /* Compute the size (in words) of a function argument. */
11575 static unsigned long
11576 rs6000_arg_size (machine_mode mode
, const_tree type
)
11578 unsigned long size
;
11580 if (mode
!= BLKmode
)
11581 size
= GET_MODE_SIZE (mode
);
11583 size
= int_size_in_bytes (type
);
11586 return (size
+ 3) >> 2;
11588 return (size
+ 7) >> 3;
11591 /* Use this to flush pending int fields. */
11594 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11595 HOST_WIDE_INT bitpos
, int final
)
11597 unsigned int startbit
, endbit
;
11598 int intregs
, intoffset
;
11600 /* Handle the situations where a float is taking up the first half
11601 of the GPR, and the other half is empty (typically due to
11602 alignment restrictions). We can detect this by a 8-byte-aligned
11603 int field, or by seeing that this is the final flush for this
11604 argument. Count the word and continue on. */
11605 if (cum
->floats_in_gpr
== 1
11606 && (cum
->intoffset
% 64 == 0
11607 || (cum
->intoffset
== -1 && final
)))
11610 cum
->floats_in_gpr
= 0;
11613 if (cum
->intoffset
== -1)
11616 intoffset
= cum
->intoffset
;
11617 cum
->intoffset
= -1;
11618 cum
->floats_in_gpr
= 0;
11620 if (intoffset
% BITS_PER_WORD
!= 0)
11622 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11623 if (!int_mode_for_size (bits
, 0).exists ())
11625 /* We couldn't find an appropriate mode, which happens,
11626 e.g., in packed structs when there are 3 bytes to load.
11627 Back intoffset back to the beginning of the word in this
11629 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11633 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11634 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11635 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11636 cum
->words
+= intregs
;
11637 /* words should be unsigned. */
11638 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11640 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11645 /* The darwin64 ABI calls for us to recurse down through structs,
11646 looking for elements passed in registers. Unfortunately, we have
11647 to track int register count here also because of misalignments
11648 in powerpc alignment mode. */
11651 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11653 HOST_WIDE_INT startbitpos
)
11657 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11658 if (TREE_CODE (f
) == FIELD_DECL
)
11660 HOST_WIDE_INT bitpos
= startbitpos
;
11661 tree ftype
= TREE_TYPE (f
);
11663 if (ftype
== error_mark_node
)
11665 mode
= TYPE_MODE (ftype
);
11667 if (DECL_SIZE (f
) != 0
11668 && tree_fits_uhwi_p (bit_position (f
)))
11669 bitpos
+= int_bit_position (f
);
11671 /* ??? FIXME: else assume zero offset. */
11673 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11674 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11675 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11677 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11678 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11679 cum
->fregno
+= n_fpregs
;
11680 /* Single-precision floats present a special problem for
11681 us, because they are smaller than an 8-byte GPR, and so
11682 the structure-packing rules combined with the standard
11683 varargs behavior mean that we want to pack float/float
11684 and float/int combinations into a single register's
11685 space. This is complicated by the arg advance flushing,
11686 which works on arbitrarily large groups of int-type
11688 if (mode
== SFmode
)
11690 if (cum
->floats_in_gpr
== 1)
11692 /* Two floats in a word; count the word and reset
11693 the float count. */
11695 cum
->floats_in_gpr
= 0;
11697 else if (bitpos
% 64 == 0)
11699 /* A float at the beginning of an 8-byte word;
11700 count it and put off adjusting cum->words until
11701 we see if a arg advance flush is going to do it
11703 cum
->floats_in_gpr
++;
11707 /* The float is at the end of a word, preceded
11708 by integer fields, so the arg advance flush
11709 just above has already set cum->words and
11710 everything is taken care of. */
11714 cum
->words
+= n_fpregs
;
11716 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11718 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11722 else if (cum
->intoffset
== -1)
11723 cum
->intoffset
= bitpos
;
11727 /* Check for an item that needs to be considered specially under the darwin 64
11728 bit ABI. These are record types where the mode is BLK or the structure is
11729 8 bytes in size. */
11731 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11733 return rs6000_darwin64_abi
11734 && ((mode
== BLKmode
11735 && TREE_CODE (type
) == RECORD_TYPE
11736 && int_size_in_bytes (type
) > 0)
11737 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11738 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11741 /* Update the data in CUM to advance over an argument
11742 of mode MODE and data type TYPE.
11743 (TYPE is null for libcalls where that information may not be available.)
11745 Note that for args passed by reference, function_arg will be called
11746 with MODE and TYPE set to that of the pointer to the arg, not the arg
11750 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11751 const_tree type
, bool named
, int depth
)
11753 machine_mode elt_mode
;
11756 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11758 /* Only tick off an argument if we're not recursing. */
11760 cum
->nargs_prototype
--;
11762 #ifdef HAVE_AS_GNU_ATTRIBUTE
11763 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11766 if (SCALAR_FLOAT_MODE_P (mode
))
11768 rs6000_passes_float
= true;
11769 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11770 && (FLOAT128_IBM_P (mode
)
11771 || FLOAT128_IEEE_P (mode
)
11773 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11774 rs6000_passes_long_double
= true;
11776 if ((named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11777 || (PAIRED_VECTOR_MODE (mode
)
11779 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
))
11780 rs6000_passes_vector
= true;
11784 if (TARGET_ALTIVEC_ABI
11785 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11786 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11787 && int_size_in_bytes (type
) == 16)))
11789 bool stack
= false;
11791 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11793 cum
->vregno
+= n_elts
;
11795 if (!TARGET_ALTIVEC
)
11796 error ("cannot pass argument in vector register because"
11797 " altivec instructions are disabled, use %qs"
11798 " to enable them", "-maltivec");
11800 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11801 even if it is going to be passed in a vector register.
11802 Darwin does the same for variable-argument functions. */
11803 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11805 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11815 /* Vector parameters must be 16-byte aligned. In 32-bit
11816 mode this means we need to take into account the offset
11817 to the parameter save area. In 64-bit mode, they just
11818 have to start on an even word, since the parameter save
11819 area is 16-byte aligned. */
11821 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11823 align
= cum
->words
& 1;
11824 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11826 if (TARGET_DEBUG_ARG
)
11828 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11829 cum
->words
, align
);
11830 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11831 cum
->nargs_prototype
, cum
->prototype
,
11832 GET_MODE_NAME (mode
));
11836 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11838 int size
= int_size_in_bytes (type
);
11839 /* Variable sized types have size == -1 and are
11840 treated as if consisting entirely of ints.
11841 Pad to 16 byte boundary if needed. */
11842 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11843 && (cum
->words
% 2) != 0)
11845 /* For varargs, we can just go up by the size of the struct. */
11847 cum
->words
+= (size
+ 7) / 8;
11850 /* It is tempting to say int register count just goes up by
11851 sizeof(type)/8, but this is wrong in a case such as
11852 { int; double; int; } [powerpc alignment]. We have to
11853 grovel through the fields for these too. */
11854 cum
->intoffset
= 0;
11855 cum
->floats_in_gpr
= 0;
11856 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11857 rs6000_darwin64_record_arg_advance_flush (cum
,
11858 size
* BITS_PER_UNIT
, 1);
11860 if (TARGET_DEBUG_ARG
)
11862 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11863 cum
->words
, TYPE_ALIGN (type
), size
);
11865 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11866 cum
->nargs_prototype
, cum
->prototype
,
11867 GET_MODE_NAME (mode
));
11870 else if (DEFAULT_ABI
== ABI_V4
)
11872 if (abi_v4_pass_in_fpr (mode
))
11874 /* _Decimal128 must use an even/odd register pair. This assumes
11875 that the register number is odd when fregno is odd. */
11876 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11879 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11880 <= FP_ARG_V4_MAX_REG
)
11881 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11884 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11885 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11886 || mode
== DDmode
|| mode
== TDmode
)
11887 cum
->words
+= cum
->words
& 1;
11888 cum
->words
+= rs6000_arg_size (mode
, type
);
11893 int n_words
= rs6000_arg_size (mode
, type
);
11894 int gregno
= cum
->sysv_gregno
;
11896 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11897 As does any other 2 word item such as complex int due to a
11898 historical mistake. */
11900 gregno
+= (1 - gregno
) & 1;
11902 /* Multi-reg args are not split between registers and stack. */
11903 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11905 /* Long long is aligned on the stack. So are other 2 word
11906 items such as complex int due to a historical mistake. */
11908 cum
->words
+= cum
->words
& 1;
11909 cum
->words
+= n_words
;
11912 /* Note: continuing to accumulate gregno past when we've started
11913 spilling to the stack indicates the fact that we've started
11914 spilling to the stack to expand_builtin_saveregs. */
11915 cum
->sysv_gregno
= gregno
+ n_words
;
11918 if (TARGET_DEBUG_ARG
)
11920 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11921 cum
->words
, cum
->fregno
);
11922 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11923 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11924 fprintf (stderr
, "mode = %4s, named = %d\n",
11925 GET_MODE_NAME (mode
), named
);
11930 int n_words
= rs6000_arg_size (mode
, type
);
11931 int start_words
= cum
->words
;
11932 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11934 cum
->words
= align_words
+ n_words
;
11936 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11938 /* _Decimal128 must be passed in an even/odd float register pair.
11939 This assumes that the register number is odd when fregno is
11941 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11943 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11946 if (TARGET_DEBUG_ARG
)
11948 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11949 cum
->words
, cum
->fregno
);
11950 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11951 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11952 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11953 named
, align_words
- start_words
, depth
);
11959 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
11960 const_tree type
, bool named
)
11962 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
11966 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11967 structure between cum->intoffset and bitpos to integer registers. */
11970 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
11971 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
11974 unsigned int regno
;
11975 unsigned int startbit
, endbit
;
11976 int this_regno
, intregs
, intoffset
;
11979 if (cum
->intoffset
== -1)
11982 intoffset
= cum
->intoffset
;
11983 cum
->intoffset
= -1;
11985 /* If this is the trailing part of a word, try to only load that
11986 much into the register. Otherwise load the whole register. Note
11987 that in the latter case we may pick up unwanted bits. It's not a
11988 problem at the moment but may wish to revisit. */
11990 if (intoffset
% BITS_PER_WORD
!= 0)
11992 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11993 if (!int_mode_for_size (bits
, 0).exists (&mode
))
11995 /* We couldn't find an appropriate mode, which happens,
11996 e.g., in packed structs when there are 3 bytes to load.
11997 Back intoffset back to the beginning of the word in this
11999 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12006 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
12007 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
12008 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
12009 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
12011 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
12012 cum
->use_stack
= 1;
12014 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
12018 intoffset
/= BITS_PER_UNIT
;
12021 regno
= GP_ARG_MIN_REG
+ this_regno
;
12022 reg
= gen_rtx_REG (mode
, regno
);
12024 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
12027 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
12031 while (intregs
> 0);
12034 /* Recursive workhorse for the following. */
12037 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
12038 HOST_WIDE_INT startbitpos
, rtx rvec
[],
12043 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
12044 if (TREE_CODE (f
) == FIELD_DECL
)
12046 HOST_WIDE_INT bitpos
= startbitpos
;
12047 tree ftype
= TREE_TYPE (f
);
12049 if (ftype
== error_mark_node
)
12051 mode
= TYPE_MODE (ftype
);
12053 if (DECL_SIZE (f
) != 0
12054 && tree_fits_uhwi_p (bit_position (f
)))
12055 bitpos
+= int_bit_position (f
);
12057 /* ??? FIXME: else assume zero offset. */
12059 if (TREE_CODE (ftype
) == RECORD_TYPE
)
12060 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
12061 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
12063 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
12067 case E_SCmode
: mode
= SFmode
; break;
12068 case E_DCmode
: mode
= DFmode
; break;
12069 case E_TCmode
: mode
= TFmode
; break;
12073 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12074 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
12076 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
12077 && (mode
== TFmode
|| mode
== TDmode
));
12078 /* Long double or _Decimal128 split over regs and memory. */
12079 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
12083 = gen_rtx_EXPR_LIST (VOIDmode
,
12084 gen_rtx_REG (mode
, cum
->fregno
++),
12085 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12086 if (FLOAT128_2REG_P (mode
))
12089 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
12091 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12093 = gen_rtx_EXPR_LIST (VOIDmode
,
12094 gen_rtx_REG (mode
, cum
->vregno
++),
12095 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12097 else if (cum
->intoffset
== -1)
12098 cum
->intoffset
= bitpos
;
12102 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12103 the register(s) to be used for each field and subfield of a struct
12104 being passed by value, along with the offset of where the
12105 register's value may be found in the block. FP fields go in FP
12106 register, vector fields go in vector registers, and everything
12107 else goes in int registers, packed as in memory.
12109 This code is also used for function return values. RETVAL indicates
12110 whether this is the case.
12112 Much of this is taken from the SPARC V9 port, which has a similar
12113 calling convention. */
12116 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
12117 bool named
, bool retval
)
12119 rtx rvec
[FIRST_PSEUDO_REGISTER
];
12120 int k
= 1, kbase
= 1;
12121 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
12122 /* This is a copy; modifications are not visible to our caller. */
12123 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
12124 CUMULATIVE_ARGS
*cum
= ©_cum
;
12126 /* Pad to 16 byte boundary if needed. */
12127 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
12128 && (cum
->words
% 2) != 0)
12131 cum
->intoffset
= 0;
12132 cum
->use_stack
= 0;
12133 cum
->named
= named
;
12135 /* Put entries into rvec[] for individual FP and vector fields, and
12136 for the chunks of memory that go in int regs. Note we start at
12137 element 1; 0 is reserved for an indication of using memory, and
12138 may or may not be filled in below. */
12139 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
12140 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
12142 /* If any part of the struct went on the stack put all of it there.
12143 This hack is because the generic code for
12144 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12145 parts of the struct are not at the beginning. */
12146 if (cum
->use_stack
)
12149 return NULL_RTX
; /* doesn't go in registers at all */
12151 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12153 if (k
> 1 || cum
->use_stack
)
12154 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
12159 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12162 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
12167 rtx rvec
[GP_ARG_NUM_REG
+ 1];
12169 if (align_words
>= GP_ARG_NUM_REG
)
12172 n_units
= rs6000_arg_size (mode
, type
);
12174 /* Optimize the simple case where the arg fits in one gpr, except in
12175 the case of BLKmode due to assign_parms assuming that registers are
12176 BITS_PER_WORD wide. */
12178 || (n_units
== 1 && mode
!= BLKmode
))
12179 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12182 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
12183 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12184 using a magic NULL_RTX component.
12185 This is not strictly correct. Only some of the arg belongs in
12186 memory, not all of it. However, the normal scheme using
12187 function_arg_partial_nregs can result in unusual subregs, eg.
12188 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12189 store the whole arg to memory is often more efficient than code
12190 to store pieces, and we know that space is available in the right
12191 place for the whole arg. */
12192 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12197 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
12198 rtx off
= GEN_INT (i
++ * 4);
12199 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12201 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
12203 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12206 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12207 but must also be copied into the parameter save area starting at
12208 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12209 to the GPRs and/or memory. Return the number of elements used. */
12212 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
12213 int align_words
, rtx
*rvec
)
12217 if (align_words
< GP_ARG_NUM_REG
)
12219 int n_words
= rs6000_arg_size (mode
, type
);
12221 if (align_words
+ n_words
> GP_ARG_NUM_REG
12223 || (TARGET_32BIT
&& TARGET_POWERPC64
))
12225 /* If this is partially on the stack, then we only
12226 include the portion actually in registers here. */
12227 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12230 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12232 /* Not all of the arg fits in gprs. Say that it goes in memory
12233 too, using a magic NULL_RTX component. Also see comment in
12234 rs6000_mixed_function_arg for why the normal
12235 function_arg_partial_nregs scheme doesn't work in this case. */
12236 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12241 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12242 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
12243 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12245 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12249 /* The whole arg fits in gprs. */
12250 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12251 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
12256 /* It's entirely in memory. */
12257 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12263 /* RVEC is a vector of K components of an argument of mode MODE.
12264 Construct the final function_arg return value from it. */
12267 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
12269 gcc_assert (k
>= 1);
12271 /* Avoid returning a PARALLEL in the trivial cases. */
12274 if (XEXP (rvec
[0], 0) == NULL_RTX
)
12277 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
12278 return XEXP (rvec
[0], 0);
12281 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12284 /* Determine where to put an argument to a function.
12285 Value is zero to push the argument on the stack,
12286 or a hard register in which to store the argument.
12288 MODE is the argument's machine mode.
12289 TYPE is the data type of the argument (as a tree).
12290 This is null for libcalls where that information may
12292 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12293 the preceding args and about the function being called. It is
12294 not modified in this routine.
12295 NAMED is nonzero if this argument is a named parameter
12296 (otherwise it is an extra parameter matching an ellipsis).
12298 On RS/6000 the first eight words of non-FP are normally in registers
12299 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12300 Under V.4, the first 8 FP args are in registers.
12302 If this is floating-point and no prototype is specified, we use
12303 both an FP and integer register (or possibly FP reg and stack). Library
12304 functions (when CALL_LIBCALL is set) always have the proper types for args,
12305 so we can pass the FP value just in one register. emit_library_function
12306 doesn't support PARALLEL anyway.
12308 Note that for args passed by reference, function_arg will be called
12309 with MODE and TYPE set to that of the pointer to the arg, not the arg
12313 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
12314 const_tree type
, bool named
)
12316 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12317 enum rs6000_abi abi
= DEFAULT_ABI
;
12318 machine_mode elt_mode
;
12321 /* Return a marker to indicate whether CR1 needs to set or clear the
12322 bit that V.4 uses to say fp args were passed in registers.
12323 Assume that we don't need the marker for software floating point,
12324 or compiler generated library calls. */
12325 if (mode
== VOIDmode
)
12328 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
12330 || (cum
->nargs_prototype
< 0
12331 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
12332 && TARGET_HARD_FLOAT
)
12333 return GEN_INT (cum
->call_cookie
12334 | ((cum
->fregno
== FP_ARG_MIN_REG
)
12335 ? CALL_V4_SET_FP_ARGS
12336 : CALL_V4_CLEAR_FP_ARGS
));
12338 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
12341 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12343 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12345 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
12346 if (rslt
!= NULL_RTX
)
12348 /* Else fall through to usual handling. */
12351 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12353 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12357 /* Do we also need to pass this argument in the parameter save area?
12358 Library support functions for IEEE 128-bit are assumed to not need the
12359 value passed both in GPRs and in vector registers. */
12360 if (TARGET_64BIT
&& !cum
->prototype
12361 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12363 int align_words
= ROUND_UP (cum
->words
, 2);
12364 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12367 /* Describe where this argument goes in the vector registers. */
12368 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
12370 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
12371 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12372 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12375 return rs6000_finish_function_arg (mode
, rvec
, k
);
12377 else if (TARGET_ALTIVEC_ABI
12378 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
12379 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
12380 && int_size_in_bytes (type
) == 16)))
12382 if (named
|| abi
== ABI_V4
)
12386 /* Vector parameters to varargs functions under AIX or Darwin
12387 get passed in memory and possibly also in GPRs. */
12388 int align
, align_words
, n_words
;
12389 machine_mode part_mode
;
12391 /* Vector parameters must be 16-byte aligned. In 32-bit
12392 mode this means we need to take into account the offset
12393 to the parameter save area. In 64-bit mode, they just
12394 have to start on an even word, since the parameter save
12395 area is 16-byte aligned. */
12397 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
12399 align
= cum
->words
& 1;
12400 align_words
= cum
->words
+ align
;
12402 /* Out of registers? Memory, then. */
12403 if (align_words
>= GP_ARG_NUM_REG
)
12406 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12407 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12409 /* The vector value goes in GPRs. Only the part of the
12410 value in GPRs is reported here. */
12412 n_words
= rs6000_arg_size (mode
, type
);
12413 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12414 /* Fortunately, there are only two possibilities, the value
12415 is either wholly in GPRs or half in GPRs and half not. */
12416 part_mode
= DImode
;
12418 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
12422 else if (abi
== ABI_V4
)
12424 if (abi_v4_pass_in_fpr (mode
))
12426 /* _Decimal128 must use an even/odd register pair. This assumes
12427 that the register number is odd when fregno is odd. */
12428 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12431 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
12432 <= FP_ARG_V4_MAX_REG
)
12433 return gen_rtx_REG (mode
, cum
->fregno
);
12439 int n_words
= rs6000_arg_size (mode
, type
);
12440 int gregno
= cum
->sysv_gregno
;
12442 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12443 As does any other 2 word item such as complex int due to a
12444 historical mistake. */
12446 gregno
+= (1 - gregno
) & 1;
12448 /* Multi-reg args are not split between registers and stack. */
12449 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
12452 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12453 return rs6000_mixed_function_arg (mode
, type
,
12454 gregno
- GP_ARG_MIN_REG
);
12455 return gen_rtx_REG (mode
, gregno
);
12460 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12462 /* _Decimal128 must be passed in an even/odd float register pair.
12463 This assumes that the register number is odd when fregno is odd. */
12464 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12467 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12469 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12472 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12475 /* Do we also need to pass this argument in the parameter
12477 if (type
&& (cum
->nargs_prototype
<= 0
12478 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12479 && TARGET_XL_COMPAT
12480 && align_words
>= GP_ARG_NUM_REG
)))
12481 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12483 /* Describe where this argument goes in the fprs. */
12484 for (i
= 0; i
< n_elts
12485 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
12487 /* Check if the argument is split over registers and memory.
12488 This can only ever happen for long double or _Decimal128;
12489 complex types are handled via split_complex_arg. */
12490 machine_mode fmode
= elt_mode
;
12491 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
12493 gcc_assert (FLOAT128_2REG_P (fmode
));
12494 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
12497 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
12498 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12499 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12502 /* If there were not enough FPRs to hold the argument, the rest
12503 usually goes into memory. However, if the current position
12504 is still within the register parameter area, a portion may
12505 actually have to go into GPRs.
12507 Note that it may happen that the portion of the argument
12508 passed in the first "half" of the first GPR was already
12509 passed in the last FPR as well.
12511 For unnamed arguments, we already set up GPRs to cover the
12512 whole argument in rs6000_psave_function_arg, so there is
12513 nothing further to do at this point. */
12514 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
12515 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
12516 && cum
->nargs_prototype
> 0)
12518 static bool warned
;
12520 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12521 int n_words
= rs6000_arg_size (mode
, type
);
12523 align_words
+= fpr_words
;
12524 n_words
-= fpr_words
;
12528 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12529 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12530 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12532 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12534 if (!warned
&& warn_psabi
)
12537 inform (input_location
,
12538 "the ABI of passing homogeneous float aggregates"
12539 " has changed in GCC 5");
12543 return rs6000_finish_function_arg (mode
, rvec
, k
);
12545 else if (align_words
< GP_ARG_NUM_REG
)
12547 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12548 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12550 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12557 /* For an arg passed partly in registers and partly in memory, this is
12558 the number of bytes passed in registers. For args passed entirely in
12559 registers or entirely in memory, zero. When an arg is described by a
12560 PARALLEL, perhaps using more than one register type, this function
12561 returns the number of bytes used by the first element of the PARALLEL. */
12564 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12565 tree type
, bool named
)
12567 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12568 bool passed_in_gprs
= true;
12571 machine_mode elt_mode
;
12574 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12576 if (DEFAULT_ABI
== ABI_V4
)
12579 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12581 /* If we are passing this arg in the fixed parameter save area (gprs or
12582 memory) as well as VRs, we do not use the partial bytes mechanism;
12583 instead, rs6000_function_arg will return a PARALLEL including a memory
12584 element as necessary. Library support functions for IEEE 128-bit are
12585 assumed to not need the value passed both in GPRs and in vector
12587 if (TARGET_64BIT
&& !cum
->prototype
12588 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12591 /* Otherwise, we pass in VRs only. Check for partial copies. */
12592 passed_in_gprs
= false;
12593 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12594 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12597 /* In this complicated case we just disable the partial_nregs code. */
12598 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12601 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12603 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12605 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12607 /* If we are passing this arg in the fixed parameter save area
12608 (gprs or memory) as well as FPRs, we do not use the partial
12609 bytes mechanism; instead, rs6000_function_arg will return a
12610 PARALLEL including a memory element as necessary. */
12612 && (cum
->nargs_prototype
<= 0
12613 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12614 && TARGET_XL_COMPAT
12615 && align_words
>= GP_ARG_NUM_REG
)))
12618 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12619 passed_in_gprs
= false;
12620 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12622 /* Compute number of bytes / words passed in FPRs. If there
12623 is still space available in the register parameter area
12624 *after* that amount, a part of the argument will be passed
12625 in GPRs. In that case, the total amount passed in any
12626 registers is equal to the amount that would have been passed
12627 in GPRs if everything were passed there, so we fall back to
12628 the GPR code below to compute the appropriate value. */
12629 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12630 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12631 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12633 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12634 passed_in_gprs
= true;
12641 && align_words
< GP_ARG_NUM_REG
12642 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12643 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12645 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12646 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12651 /* A C expression that indicates when an argument must be passed by
12652 reference. If nonzero for an argument, a copy of that argument is
12653 made in memory and a pointer to the argument is passed instead of
12654 the argument itself. The pointer is passed in whatever way is
12655 appropriate for passing a pointer to that type.
12657 Under V.4, aggregates and long double are passed by reference.
12659 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12660 reference unless the AltiVec vector extension ABI is in force.
12662 As an extension to all ABIs, variable sized types are passed by
12666 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12667 machine_mode mode
, const_tree type
,
12668 bool named ATTRIBUTE_UNUSED
)
12673 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12674 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12676 if (TARGET_DEBUG_ARG
)
12677 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12681 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12683 if (TARGET_DEBUG_ARG
)
12684 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12688 if (int_size_in_bytes (type
) < 0)
12690 if (TARGET_DEBUG_ARG
)
12691 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12695 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12696 modes only exist for GCC vector types if -maltivec. */
12697 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12699 if (TARGET_DEBUG_ARG
)
12700 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12704 /* Pass synthetic vectors in memory. */
12705 if (TREE_CODE (type
) == VECTOR_TYPE
12706 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12708 static bool warned_for_pass_big_vectors
= false;
12709 if (TARGET_DEBUG_ARG
)
12710 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12711 if (!warned_for_pass_big_vectors
)
12713 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12714 "non-standard ABI extension with no compatibility "
12716 warned_for_pass_big_vectors
= true;
12724 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12725 already processes. Return true if the parameter must be passed
12726 (fully or partially) on the stack. */
12729 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12735 /* Catch errors. */
12736 if (type
== NULL
|| type
== error_mark_node
)
12739 /* Handle types with no storage requirement. */
12740 if (TYPE_MODE (type
) == VOIDmode
)
12743 /* Handle complex types. */
12744 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12745 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12746 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12748 /* Handle transparent aggregates. */
12749 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12750 && TYPE_TRANSPARENT_AGGR (type
))
12751 type
= TREE_TYPE (first_field (type
));
12753 /* See if this arg was passed by invisible reference. */
12754 if (pass_by_reference (get_cumulative_args (args_so_far
),
12755 TYPE_MODE (type
), type
, true))
12756 type
= build_pointer_type (type
);
12758 /* Find mode as it is passed by the ABI. */
12759 unsignedp
= TYPE_UNSIGNED (type
);
12760 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12762 /* If we must pass in stack, we need a stack. */
12763 if (rs6000_must_pass_in_stack (mode
, type
))
12766 /* If there is no incoming register, we need a stack. */
12767 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12768 if (entry_parm
== NULL
)
12771 /* Likewise if we need to pass both in registers and on the stack. */
12772 if (GET_CODE (entry_parm
) == PARALLEL
12773 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12776 /* Also true if we're partially in registers and partially not. */
12777 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12780 /* Update info on where next arg arrives in registers. */
12781 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12785 /* Return true if FUN has no prototype, has a variable argument
12786 list, or passes any parameter in memory. */
12789 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12791 tree fntype
, result
;
12792 CUMULATIVE_ARGS args_so_far_v
;
12793 cumulative_args_t args_so_far
;
12796 /* Must be a libcall, all of which only use reg parms. */
12801 fntype
= TREE_TYPE (fun
);
12803 /* Varargs functions need the parameter save area. */
12804 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12807 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12808 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12810 /* When incoming, we will have been passed the function decl.
12811 It is necessary to use the decl to handle K&R style functions,
12812 where TYPE_ARG_TYPES may not be available. */
12815 gcc_assert (DECL_P (fun
));
12816 result
= DECL_RESULT (fun
);
12819 result
= TREE_TYPE (fntype
);
12821 if (result
&& aggregate_value_p (result
, fntype
))
12823 if (!TYPE_P (result
))
12824 result
= TREE_TYPE (result
);
12825 result
= build_pointer_type (result
);
12826 rs6000_parm_needs_stack (args_so_far
, result
);
12833 for (parm
= DECL_ARGUMENTS (fun
);
12834 parm
&& parm
!= void_list_node
;
12835 parm
= TREE_CHAIN (parm
))
12836 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12841 function_args_iterator args_iter
;
12844 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12845 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12852 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12853 usually a constant depending on the ABI. However, in the ELFv2 ABI
12854 the register parameter area is optional when calling a function that
12855 has a prototype is scope, has no variable argument list, and passes
12856 all parameters in registers. */
12859 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12861 int reg_parm_stack_space
;
12863 switch (DEFAULT_ABI
)
12866 reg_parm_stack_space
= 0;
12871 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12875 /* ??? Recomputing this every time is a bit expensive. Is there
12876 a place to cache this information? */
12877 if (rs6000_function_parms_need_stack (fun
, incoming
))
12878 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12880 reg_parm_stack_space
= 0;
12884 return reg_parm_stack_space
;
12888 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12891 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12896 for (i
= 0; i
< nregs
; i
++)
12898 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12899 if (reload_completed
)
12901 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12904 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12905 i
* GET_MODE_SIZE (reg_mode
));
12908 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12912 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12916 /* Perform any needed actions needed for a function that is receiving a
12917 variable number of arguments.
12921 MODE and TYPE are the mode and type of the current parameter.
12923 PRETEND_SIZE is a variable that should be set to the amount of stack
12924 that must be pushed by the prolog to pretend that our caller pushed
12927 Normally, this macro will push all remaining incoming registers on the
12928 stack and set PRETEND_SIZE to the length of the registers pushed. */
12931 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12932 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12935 CUMULATIVE_ARGS next_cum
;
12936 int reg_size
= TARGET_32BIT
? 4 : 8;
12937 rtx save_area
= NULL_RTX
, mem
;
12938 int first_reg_offset
;
12939 alias_set_type set
;
12941 /* Skip the last named argument. */
12942 next_cum
= *get_cumulative_args (cum
);
12943 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12945 if (DEFAULT_ABI
== ABI_V4
)
12947 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12951 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12952 HOST_WIDE_INT offset
= 0;
12954 /* Try to optimize the size of the varargs save area.
12955 The ABI requires that ap.reg_save_area is doubleword
12956 aligned, but we don't need to allocate space for all
12957 the bytes, only those to which we actually will save
12959 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
12960 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
12961 if (TARGET_HARD_FLOAT
12962 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12963 && cfun
->va_list_fpr_size
)
12966 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
12967 * UNITS_PER_FP_WORD
;
12968 if (cfun
->va_list_fpr_size
12969 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12970 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
12972 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12973 * UNITS_PER_FP_WORD
;
12977 offset
= -((first_reg_offset
* reg_size
) & ~7);
12978 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
12980 gpr_reg_num
= cfun
->va_list_gpr_size
;
12981 if (reg_size
== 4 && (first_reg_offset
& 1))
12984 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
12987 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
12988 * UNITS_PER_FP_WORD
12989 - (int) (GP_ARG_NUM_REG
* reg_size
);
12991 if (gpr_size
+ fpr_size
)
12994 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
12995 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
12996 reg_save_area
= XEXP (reg_save_area
, 0);
12997 if (GET_CODE (reg_save_area
) == PLUS
)
12999 gcc_assert (XEXP (reg_save_area
, 0)
13000 == virtual_stack_vars_rtx
);
13001 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
13002 offset
+= INTVAL (XEXP (reg_save_area
, 1));
13005 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
13008 cfun
->machine
->varargs_save_offset
= offset
;
13009 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
13014 first_reg_offset
= next_cum
.words
;
13015 save_area
= crtl
->args
.internal_arg_pointer
;
13017 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
13018 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
13021 set
= get_varargs_alias_set ();
13022 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
13023 && cfun
->va_list_gpr_size
)
13025 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
13027 if (va_list_gpr_counter_field
)
13028 /* V4 va_list_gpr_size counts number of registers needed. */
13029 n_gpr
= cfun
->va_list_gpr_size
;
13031 /* char * va_list instead counts number of bytes needed. */
13032 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
13037 mem
= gen_rtx_MEM (BLKmode
,
13038 plus_constant (Pmode
, save_area
,
13039 first_reg_offset
* reg_size
));
13040 MEM_NOTRAP_P (mem
) = 1;
13041 set_mem_alias_set (mem
, set
);
13042 set_mem_align (mem
, BITS_PER_WORD
);
13044 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
13048 /* Save FP registers if needed. */
13049 if (DEFAULT_ABI
== ABI_V4
13050 && TARGET_HARD_FLOAT
13052 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
13053 && cfun
->va_list_fpr_size
)
13055 int fregno
= next_cum
.fregno
, nregs
;
13056 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
13057 rtx lab
= gen_label_rtx ();
13058 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
13059 * UNITS_PER_FP_WORD
);
13062 (gen_rtx_SET (pc_rtx
,
13063 gen_rtx_IF_THEN_ELSE (VOIDmode
,
13064 gen_rtx_NE (VOIDmode
, cr1
,
13066 gen_rtx_LABEL_REF (VOIDmode
, lab
),
13070 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
13071 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
13073 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13075 plus_constant (Pmode
, save_area
, off
));
13076 MEM_NOTRAP_P (mem
) = 1;
13077 set_mem_alias_set (mem
, set
);
13078 set_mem_align (mem
, GET_MODE_ALIGNMENT (
13079 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13080 ? DFmode
: SFmode
));
13081 emit_move_insn (mem
, gen_rtx_REG (
13082 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13083 ? DFmode
: SFmode
, fregno
));
13090 /* Create the va_list data type. */
13093 rs6000_build_builtin_va_list (void)
13095 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
13097 /* For AIX, prefer 'char *' because that's what the system
13098 header files like. */
13099 if (DEFAULT_ABI
!= ABI_V4
)
13100 return build_pointer_type (char_type_node
);
13102 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
13103 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
13104 get_identifier ("__va_list_tag"), record
);
13106 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
13107 unsigned_char_type_node
);
13108 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
13109 unsigned_char_type_node
);
13110 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13111 every user file. */
13112 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13113 get_identifier ("reserved"), short_unsigned_type_node
);
13114 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13115 get_identifier ("overflow_arg_area"),
13117 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13118 get_identifier ("reg_save_area"),
13121 va_list_gpr_counter_field
= f_gpr
;
13122 va_list_fpr_counter_field
= f_fpr
;
13124 DECL_FIELD_CONTEXT (f_gpr
) = record
;
13125 DECL_FIELD_CONTEXT (f_fpr
) = record
;
13126 DECL_FIELD_CONTEXT (f_res
) = record
;
13127 DECL_FIELD_CONTEXT (f_ovf
) = record
;
13128 DECL_FIELD_CONTEXT (f_sav
) = record
;
13130 TYPE_STUB_DECL (record
) = type_decl
;
13131 TYPE_NAME (record
) = type_decl
;
13132 TYPE_FIELDS (record
) = f_gpr
;
13133 DECL_CHAIN (f_gpr
) = f_fpr
;
13134 DECL_CHAIN (f_fpr
) = f_res
;
13135 DECL_CHAIN (f_res
) = f_ovf
;
13136 DECL_CHAIN (f_ovf
) = f_sav
;
13138 layout_type (record
);
13140 /* The correct type is an array type of one element. */
13141 return build_array_type (record
, build_index_type (size_zero_node
));
13144 /* Implement va_start. */
13147 rs6000_va_start (tree valist
, rtx nextarg
)
13149 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
13150 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13151 tree gpr
, fpr
, ovf
, sav
, t
;
13153 /* Only SVR4 needs something special. */
13154 if (DEFAULT_ABI
!= ABI_V4
)
13156 std_expand_builtin_va_start (valist
, nextarg
);
13160 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13161 f_fpr
= DECL_CHAIN (f_gpr
);
13162 f_res
= DECL_CHAIN (f_fpr
);
13163 f_ovf
= DECL_CHAIN (f_res
);
13164 f_sav
= DECL_CHAIN (f_ovf
);
13166 valist
= build_simple_mem_ref (valist
);
13167 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13168 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13170 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13172 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13175 /* Count number of gp and fp argument registers used. */
13176 words
= crtl
->args
.info
.words
;
13177 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
13179 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
13182 if (TARGET_DEBUG_ARG
)
13183 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
13184 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
13185 words
, n_gpr
, n_fpr
);
13187 if (cfun
->va_list_gpr_size
)
13189 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
13190 build_int_cst (NULL_TREE
, n_gpr
));
13191 TREE_SIDE_EFFECTS (t
) = 1;
13192 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13195 if (cfun
->va_list_fpr_size
)
13197 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
13198 build_int_cst (NULL_TREE
, n_fpr
));
13199 TREE_SIDE_EFFECTS (t
) = 1;
13200 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13202 #ifdef HAVE_AS_GNU_ATTRIBUTE
13203 if (call_ABI_of_interest (cfun
->decl
))
13204 rs6000_passes_float
= true;
13208 /* Find the overflow area. */
13209 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
13211 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
13212 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
13213 TREE_SIDE_EFFECTS (t
) = 1;
13214 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13216 /* If there were no va_arg invocations, don't set up the register
13218 if (!cfun
->va_list_gpr_size
13219 && !cfun
->va_list_fpr_size
13220 && n_gpr
< GP_ARG_NUM_REG
13221 && n_fpr
< FP_ARG_V4_MAX_REG
)
13224 /* Find the register save area. */
13225 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
13226 if (cfun
->machine
->varargs_save_offset
)
13227 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
13228 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
13229 TREE_SIDE_EFFECTS (t
) = 1;
13230 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13233 /* Implement va_arg. */
13236 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
13237 gimple_seq
*post_p
)
13239 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13240 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
13241 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
13242 tree lab_false
, lab_over
, addr
;
13244 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
13248 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
13250 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
13251 return build_va_arg_indirect_ref (t
);
13254 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13255 earlier version of gcc, with the property that it always applied alignment
13256 adjustments to the va-args (even for zero-sized types). The cheapest way
13257 to deal with this is to replicate the effect of the part of
13258 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13260 We don't need to check for pass-by-reference because of the test above.
13261 We can return a simplifed answer, since we know there's no offset to add. */
13264 && rs6000_darwin64_abi
)
13265 || DEFAULT_ABI
== ABI_ELFv2
13266 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
13267 && integer_zerop (TYPE_SIZE (type
)))
13269 unsigned HOST_WIDE_INT align
, boundary
;
13270 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
13271 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
13272 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
13273 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
13274 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
13275 boundary
/= BITS_PER_UNIT
;
13276 if (boundary
> align
)
13279 /* This updates arg ptr by the amount that would be necessary
13280 to align the zero-sized (but not zero-alignment) item. */
13281 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13282 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
13283 gimplify_and_add (t
, pre_p
);
13285 t
= fold_convert (sizetype
, valist_tmp
);
13286 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13287 fold_convert (TREE_TYPE (valist
),
13288 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
13289 size_int (-boundary
))));
13290 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
13291 gimplify_and_add (t
, pre_p
);
13293 /* Since it is zero-sized there's no increment for the item itself. */
13294 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
13295 return build_va_arg_indirect_ref (valist_tmp
);
13298 if (DEFAULT_ABI
!= ABI_V4
)
13300 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
13302 tree elem_type
= TREE_TYPE (type
);
13303 machine_mode elem_mode
= TYPE_MODE (elem_type
);
13304 int elem_size
= GET_MODE_SIZE (elem_mode
);
13306 if (elem_size
< UNITS_PER_WORD
)
13308 tree real_part
, imag_part
;
13309 gimple_seq post
= NULL
;
13311 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13313 /* Copy the value into a temporary, lest the formal temporary
13314 be reused out from under us. */
13315 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
13316 gimple_seq_add_seq (pre_p
, post
);
13318 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13321 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
13325 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
13328 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13329 f_fpr
= DECL_CHAIN (f_gpr
);
13330 f_res
= DECL_CHAIN (f_fpr
);
13331 f_ovf
= DECL_CHAIN (f_res
);
13332 f_sav
= DECL_CHAIN (f_ovf
);
13334 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13335 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13337 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13339 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13342 size
= int_size_in_bytes (type
);
13343 rsize
= (size
+ 3) / 4;
13344 int pad
= 4 * rsize
- size
;
13347 machine_mode mode
= TYPE_MODE (type
);
13348 if (abi_v4_pass_in_fpr (mode
))
13350 /* FP args go in FP registers, if present. */
13352 n_reg
= (size
+ 7) / 8;
13353 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
13354 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
13355 if (mode
!= SFmode
&& mode
!= SDmode
)
13360 /* Otherwise into GP registers. */
13369 /* Pull the value out of the saved registers.... */
13372 addr
= create_tmp_var (ptr_type_node
, "addr");
13374 /* AltiVec vectors never go in registers when -mabi=altivec. */
13375 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
13379 lab_false
= create_artificial_label (input_location
);
13380 lab_over
= create_artificial_label (input_location
);
13382 /* Long long is aligned in the registers. As are any other 2 gpr
13383 item such as complex int due to a historical mistake. */
13385 if (n_reg
== 2 && reg
== gpr
)
13388 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13389 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
13390 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
13391 unshare_expr (reg
), u
);
13393 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13394 reg number is 0 for f1, so we want to make it odd. */
13395 else if (reg
== fpr
&& mode
== TDmode
)
13397 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13398 build_int_cst (TREE_TYPE (reg
), 1));
13399 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
13402 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
13403 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
13404 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
13405 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
13406 gimplify_and_add (t
, pre_p
);
13410 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
13412 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13413 build_int_cst (TREE_TYPE (reg
), n_reg
));
13414 u
= fold_convert (sizetype
, u
);
13415 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
13416 t
= fold_build_pointer_plus (t
, u
);
13418 /* _Decimal32 varargs are located in the second word of the 64-bit
13419 FP register for 32-bit binaries. */
13420 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
13421 t
= fold_build_pointer_plus_hwi (t
, size
);
13423 /* Args are passed right-aligned. */
13424 if (BYTES_BIG_ENDIAN
)
13425 t
= fold_build_pointer_plus_hwi (t
, pad
);
13427 gimplify_assign (addr
, t
, pre_p
);
13429 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
13431 stmt
= gimple_build_label (lab_false
);
13432 gimple_seq_add_stmt (pre_p
, stmt
);
13434 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
13436 /* Ensure that we don't find any more args in regs.
13437 Alignment has taken care of for special cases. */
13438 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
13442 /* ... otherwise out of the overflow area. */
13444 /* Care for on-stack alignment if needed. */
13448 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
13449 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
13450 build_int_cst (TREE_TYPE (t
), -align
));
13453 /* Args are passed right-aligned. */
13454 if (BYTES_BIG_ENDIAN
)
13455 t
= fold_build_pointer_plus_hwi (t
, pad
);
13457 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
13459 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
13461 t
= fold_build_pointer_plus_hwi (t
, size
);
13462 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
13466 stmt
= gimple_build_label (lab_over
);
13467 gimple_seq_add_stmt (pre_p
, stmt
);
13470 if (STRICT_ALIGNMENT
13471 && (TYPE_ALIGN (type
)
13472 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
13474 /* The value (of type complex double, for example) may not be
13475 aligned in memory in the saved registers, so copy via a
13476 temporary. (This is the same code as used for SPARC.) */
13477 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
13478 tree dest_addr
= build_fold_addr_expr (tmp
);
13480 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
13481 3, dest_addr
, addr
, size_int (rsize
* 4));
13483 gimplify_and_add (copy
, pre_p
);
13487 addr
= fold_convert (ptrtype
, addr
);
13488 return build_va_arg_indirect_ref (addr
);
13494 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
13497 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
13498 const char *attr_string
= "";
13500 gcc_assert (name
!= NULL
);
13501 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
13503 if (rs6000_builtin_decls
[(int)code
])
13504 fatal_error (input_location
,
13505 "internal error: builtin function %qs already processed",
13508 rs6000_builtin_decls
[(int)code
] = t
=
13509 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
13511 /* Set any special attributes. */
13512 if ((classify
& RS6000_BTC_CONST
) != 0)
13514 /* const function, function only depends on the inputs. */
13515 TREE_READONLY (t
) = 1;
13516 TREE_NOTHROW (t
) = 1;
13517 attr_string
= ", const";
13519 else if ((classify
& RS6000_BTC_PURE
) != 0)
13521 /* pure function, function can read global memory, but does not set any
13523 DECL_PURE_P (t
) = 1;
13524 TREE_NOTHROW (t
) = 1;
13525 attr_string
= ", pure";
13527 else if ((classify
& RS6000_BTC_FP
) != 0)
13529 /* Function is a math function. If rounding mode is on, then treat the
13530 function as not reading global memory, but it can have arbitrary side
13531 effects. If it is off, then assume the function is a const function.
13532 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13533 builtin-attribute.def that is used for the math functions. */
13534 TREE_NOTHROW (t
) = 1;
13535 if (flag_rounding_math
)
13537 DECL_PURE_P (t
) = 1;
13538 DECL_IS_NOVOPS (t
) = 1;
13539 attr_string
= ", fp, pure";
13543 TREE_READONLY (t
) = 1;
13544 attr_string
= ", fp, const";
13547 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13548 gcc_unreachable ();
13550 if (TARGET_DEBUG_BUILTIN
)
13551 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13552 (int)code
, name
, attr_string
);
13555 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13557 #undef RS6000_BUILTIN_0
13558 #undef RS6000_BUILTIN_1
13559 #undef RS6000_BUILTIN_2
13560 #undef RS6000_BUILTIN_3
13561 #undef RS6000_BUILTIN_A
13562 #undef RS6000_BUILTIN_D
13563 #undef RS6000_BUILTIN_H
13564 #undef RS6000_BUILTIN_P
13565 #undef RS6000_BUILTIN_Q
13566 #undef RS6000_BUILTIN_X
13568 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13569 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13570 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13571 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13572 { MASK, ICODE, NAME, ENUM },
13574 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13575 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13576 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13577 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13578 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13579 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13581 static const struct builtin_description bdesc_3arg
[] =
13583 #include "rs6000-builtin.def"
13586 /* DST operations: void foo (void *, const int, const char). */
13588 #undef RS6000_BUILTIN_0
13589 #undef RS6000_BUILTIN_1
13590 #undef RS6000_BUILTIN_2
13591 #undef RS6000_BUILTIN_3
13592 #undef RS6000_BUILTIN_A
13593 #undef RS6000_BUILTIN_D
13594 #undef RS6000_BUILTIN_H
13595 #undef RS6000_BUILTIN_P
13596 #undef RS6000_BUILTIN_Q
13597 #undef RS6000_BUILTIN_X
13599 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13600 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13601 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13602 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13603 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13604 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13605 { MASK, ICODE, NAME, ENUM },
13607 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13608 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13609 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13610 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13612 static const struct builtin_description bdesc_dst
[] =
13614 #include "rs6000-builtin.def"
13617 /* Simple binary operations: VECc = foo (VECa, VECb). */
13619 #undef RS6000_BUILTIN_0
13620 #undef RS6000_BUILTIN_1
13621 #undef RS6000_BUILTIN_2
13622 #undef RS6000_BUILTIN_3
13623 #undef RS6000_BUILTIN_A
13624 #undef RS6000_BUILTIN_D
13625 #undef RS6000_BUILTIN_H
13626 #undef RS6000_BUILTIN_P
13627 #undef RS6000_BUILTIN_Q
13628 #undef RS6000_BUILTIN_X
13630 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13631 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13633 { MASK, ICODE, NAME, ENUM },
13635 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13636 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13637 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13638 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13639 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13640 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13641 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13643 static const struct builtin_description bdesc_2arg
[] =
13645 #include "rs6000-builtin.def"
13648 #undef RS6000_BUILTIN_0
13649 #undef RS6000_BUILTIN_1
13650 #undef RS6000_BUILTIN_2
13651 #undef RS6000_BUILTIN_3
13652 #undef RS6000_BUILTIN_A
13653 #undef RS6000_BUILTIN_D
13654 #undef RS6000_BUILTIN_H
13655 #undef RS6000_BUILTIN_P
13656 #undef RS6000_BUILTIN_Q
13657 #undef RS6000_BUILTIN_X
13659 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13660 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13661 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13663 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13664 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13665 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13666 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13667 { MASK, ICODE, NAME, ENUM },
13669 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13670 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13672 /* AltiVec predicates. */
13674 static const struct builtin_description bdesc_altivec_preds
[] =
13676 #include "rs6000-builtin.def"
13679 /* PAIRED predicates. */
13680 #undef RS6000_BUILTIN_0
13681 #undef RS6000_BUILTIN_1
13682 #undef RS6000_BUILTIN_2
13683 #undef RS6000_BUILTIN_3
13684 #undef RS6000_BUILTIN_A
13685 #undef RS6000_BUILTIN_D
13686 #undef RS6000_BUILTIN_H
13687 #undef RS6000_BUILTIN_P
13688 #undef RS6000_BUILTIN_Q
13689 #undef RS6000_BUILTIN_X
13691 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13692 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13693 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13695 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13696 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13698 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13699 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13700 { MASK, ICODE, NAME, ENUM },
13702 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13704 static const struct builtin_description bdesc_paired_preds
[] =
13706 #include "rs6000-builtin.def"
13709 /* ABS* operations. */
13711 #undef RS6000_BUILTIN_0
13712 #undef RS6000_BUILTIN_1
13713 #undef RS6000_BUILTIN_2
13714 #undef RS6000_BUILTIN_3
13715 #undef RS6000_BUILTIN_A
13716 #undef RS6000_BUILTIN_D
13717 #undef RS6000_BUILTIN_H
13718 #undef RS6000_BUILTIN_P
13719 #undef RS6000_BUILTIN_Q
13720 #undef RS6000_BUILTIN_X
13722 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13725 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13726 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13727 { MASK, ICODE, NAME, ENUM },
13729 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13730 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13731 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13732 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13733 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13735 static const struct builtin_description bdesc_abs
[] =
13737 #include "rs6000-builtin.def"
13740 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13743 #undef RS6000_BUILTIN_0
13744 #undef RS6000_BUILTIN_1
13745 #undef RS6000_BUILTIN_2
13746 #undef RS6000_BUILTIN_3
13747 #undef RS6000_BUILTIN_A
13748 #undef RS6000_BUILTIN_D
13749 #undef RS6000_BUILTIN_H
13750 #undef RS6000_BUILTIN_P
13751 #undef RS6000_BUILTIN_Q
13752 #undef RS6000_BUILTIN_X
13754 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13756 { MASK, ICODE, NAME, ENUM },
13758 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13759 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13760 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13761 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13762 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13763 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13764 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13765 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13767 static const struct builtin_description bdesc_1arg
[] =
13769 #include "rs6000-builtin.def"
13772 /* Simple no-argument operations: result = __builtin_darn_32 () */
13774 #undef RS6000_BUILTIN_0
13775 #undef RS6000_BUILTIN_1
13776 #undef RS6000_BUILTIN_2
13777 #undef RS6000_BUILTIN_3
13778 #undef RS6000_BUILTIN_A
13779 #undef RS6000_BUILTIN_D
13780 #undef RS6000_BUILTIN_H
13781 #undef RS6000_BUILTIN_P
13782 #undef RS6000_BUILTIN_Q
13783 #undef RS6000_BUILTIN_X
13785 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13786 { MASK, ICODE, NAME, ENUM },
13788 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13789 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13792 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13793 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13794 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13795 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13796 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13798 static const struct builtin_description bdesc_0arg
[] =
13800 #include "rs6000-builtin.def"
13803 /* HTM builtins. */
13804 #undef RS6000_BUILTIN_0
13805 #undef RS6000_BUILTIN_1
13806 #undef RS6000_BUILTIN_2
13807 #undef RS6000_BUILTIN_3
13808 #undef RS6000_BUILTIN_A
13809 #undef RS6000_BUILTIN_D
13810 #undef RS6000_BUILTIN_H
13811 #undef RS6000_BUILTIN_P
13812 #undef RS6000_BUILTIN_Q
13813 #undef RS6000_BUILTIN_X
13815 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13816 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13817 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13818 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13819 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13822 { MASK, ICODE, NAME, ENUM },
13824 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13825 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13826 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13828 static const struct builtin_description bdesc_htm
[] =
13830 #include "rs6000-builtin.def"
13833 #undef RS6000_BUILTIN_0
13834 #undef RS6000_BUILTIN_1
13835 #undef RS6000_BUILTIN_2
13836 #undef RS6000_BUILTIN_3
13837 #undef RS6000_BUILTIN_A
13838 #undef RS6000_BUILTIN_D
13839 #undef RS6000_BUILTIN_H
13840 #undef RS6000_BUILTIN_P
13841 #undef RS6000_BUILTIN_Q
13843 /* Return true if a builtin function is overloaded. */
13845 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13847 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13851 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13853 return rs6000_builtin_info
[(int)fncode
].name
;
13856 /* Expand an expression EXP that calls a builtin without arguments. */
13858 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13861 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13863 if (icode
== CODE_FOR_nothing
)
13864 /* Builtin not supported on this processor. */
13868 || GET_MODE (target
) != tmode
13869 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13870 target
= gen_reg_rtx (tmode
);
13872 pat
= GEN_FCN (icode
) (target
);
13882 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13885 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13886 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13887 rtx op0
= expand_normal (arg0
);
13888 rtx op1
= expand_normal (arg1
);
13889 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13890 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13892 if (icode
== CODE_FOR_nothing
)
13893 /* Builtin not supported on this processor. */
13896 /* If we got invalid arguments bail out before generating bad rtl. */
13897 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13900 if (GET_CODE (op0
) != CONST_INT
13901 || INTVAL (op0
) > 255
13902 || INTVAL (op0
) < 0)
13904 error ("argument 1 must be an 8-bit field value");
13908 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13909 op0
= copy_to_mode_reg (mode0
, op0
);
13911 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13912 op1
= copy_to_mode_reg (mode1
, op1
);
13914 pat
= GEN_FCN (icode
) (op0
, op1
);
13923 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13926 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13927 rtx op0
= expand_normal (arg0
);
13928 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13929 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13931 if (icode
== CODE_FOR_nothing
)
13932 /* Builtin not supported on this processor. */
13935 /* If we got invalid arguments bail out before generating bad rtl. */
13936 if (arg0
== error_mark_node
)
13939 if (icode
== CODE_FOR_altivec_vspltisb
13940 || icode
== CODE_FOR_altivec_vspltish
13941 || icode
== CODE_FOR_altivec_vspltisw
)
13943 /* Only allow 5-bit *signed* literals. */
13944 if (GET_CODE (op0
) != CONST_INT
13945 || INTVAL (op0
) > 15
13946 || INTVAL (op0
) < -16)
13948 error ("argument 1 must be a 5-bit signed literal");
13949 return CONST0_RTX (tmode
);
13954 || GET_MODE (target
) != tmode
13955 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13956 target
= gen_reg_rtx (tmode
);
13958 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13959 op0
= copy_to_mode_reg (mode0
, op0
);
13961 pat
= GEN_FCN (icode
) (target
, op0
);
13970 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
13972 rtx pat
, scratch1
, scratch2
;
13973 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13974 rtx op0
= expand_normal (arg0
);
13975 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13976 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13978 /* If we have invalid arguments, bail out before generating bad rtl. */
13979 if (arg0
== error_mark_node
)
13983 || GET_MODE (target
) != tmode
13984 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13985 target
= gen_reg_rtx (tmode
);
13987 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13988 op0
= copy_to_mode_reg (mode0
, op0
);
13990 scratch1
= gen_reg_rtx (mode0
);
13991 scratch2
= gen_reg_rtx (mode0
);
13993 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
14002 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14005 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14006 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14007 rtx op0
= expand_normal (arg0
);
14008 rtx op1
= expand_normal (arg1
);
14009 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14010 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14011 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14013 if (icode
== CODE_FOR_nothing
)
14014 /* Builtin not supported on this processor. */
14017 /* If we got invalid arguments bail out before generating bad rtl. */
14018 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14021 if (icode
== CODE_FOR_altivec_vcfux
14022 || icode
== CODE_FOR_altivec_vcfsx
14023 || icode
== CODE_FOR_altivec_vctsxs
14024 || icode
== CODE_FOR_altivec_vctuxs
14025 || icode
== CODE_FOR_altivec_vspltb
14026 || icode
== CODE_FOR_altivec_vsplth
14027 || icode
== CODE_FOR_altivec_vspltw
)
14029 /* Only allow 5-bit unsigned literals. */
14031 if (TREE_CODE (arg1
) != INTEGER_CST
14032 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
14034 error ("argument 2 must be a 5-bit unsigned literal");
14035 return CONST0_RTX (tmode
);
14038 else if (icode
== CODE_FOR_dfptstsfi_eq_dd
14039 || icode
== CODE_FOR_dfptstsfi_lt_dd
14040 || icode
== CODE_FOR_dfptstsfi_gt_dd
14041 || icode
== CODE_FOR_dfptstsfi_unordered_dd
14042 || icode
== CODE_FOR_dfptstsfi_eq_td
14043 || icode
== CODE_FOR_dfptstsfi_lt_td
14044 || icode
== CODE_FOR_dfptstsfi_gt_td
14045 || icode
== CODE_FOR_dfptstsfi_unordered_td
)
14047 /* Only allow 6-bit unsigned literals. */
14049 if (TREE_CODE (arg0
) != INTEGER_CST
14050 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
14052 error ("argument 1 must be a 6-bit unsigned literal");
14053 return CONST0_RTX (tmode
);
14056 else if (icode
== CODE_FOR_xststdcqp
14057 || icode
== CODE_FOR_xststdcdp
14058 || icode
== CODE_FOR_xststdcsp
14059 || icode
== CODE_FOR_xvtstdcdp
14060 || icode
== CODE_FOR_xvtstdcsp
)
14062 /* Only allow 7-bit unsigned literals. */
14064 if (TREE_CODE (arg1
) != INTEGER_CST
14065 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
14067 error ("argument 2 must be a 7-bit unsigned literal");
14068 return CONST0_RTX (tmode
);
14071 else if (icode
== CODE_FOR_unpackv1ti
14072 || icode
== CODE_FOR_unpackkf
14073 || icode
== CODE_FOR_unpacktf
14074 || icode
== CODE_FOR_unpackif
14075 || icode
== CODE_FOR_unpacktd
)
14077 /* Only allow 1-bit unsigned literals. */
14079 if (TREE_CODE (arg1
) != INTEGER_CST
14080 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
14082 error ("argument 2 must be a 1-bit unsigned literal");
14083 return CONST0_RTX (tmode
);
14088 || GET_MODE (target
) != tmode
14089 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14090 target
= gen_reg_rtx (tmode
);
14092 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14093 op0
= copy_to_mode_reg (mode0
, op0
);
14094 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14095 op1
= copy_to_mode_reg (mode1
, op1
);
14097 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14106 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
14109 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
14110 tree arg0
= CALL_EXPR_ARG (exp
, 1);
14111 tree arg1
= CALL_EXPR_ARG (exp
, 2);
14112 rtx op0
= expand_normal (arg0
);
14113 rtx op1
= expand_normal (arg1
);
14114 machine_mode tmode
= SImode
;
14115 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14116 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14119 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
14121 error ("argument 1 of %qs must be a constant",
14122 "__builtin_altivec_predicate");
14126 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
14128 gcc_assert (mode0
== mode1
);
14130 /* If we have invalid arguments, bail out before generating bad rtl. */
14131 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14135 || GET_MODE (target
) != tmode
14136 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14137 target
= gen_reg_rtx (tmode
);
14139 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14140 op0
= copy_to_mode_reg (mode0
, op0
);
14141 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14142 op1
= copy_to_mode_reg (mode1
, op1
);
14144 /* Note that for many of the relevant operations (e.g. cmpne or
14145 cmpeq) with float or double operands, it makes more sense for the
14146 mode of the allocated scratch register to select a vector of
14147 integer. But the choice to copy the mode of operand 0 was made
14148 long ago and there are no plans to change it. */
14149 scratch
= gen_reg_rtx (mode0
);
14151 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
14156 /* The vec_any* and vec_all* predicates use the same opcodes for two
14157 different operations, but the bits in CR6 will be different
14158 depending on what information we want. So we have to play tricks
14159 with CR6 to get the right bits out.
14161 If you think this is disgusting, look at the specs for the
14162 AltiVec predicates. */
14164 switch (cr6_form_int
)
14167 emit_insn (gen_cr6_test_for_zero (target
));
14170 emit_insn (gen_cr6_test_for_zero_reverse (target
));
14173 emit_insn (gen_cr6_test_for_lt (target
));
14176 emit_insn (gen_cr6_test_for_lt_reverse (target
));
14179 error ("argument 1 of %qs is out of range",
14180 "__builtin_altivec_predicate");
14188 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
14191 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14192 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14193 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14194 machine_mode mode0
= Pmode
;
14195 machine_mode mode1
= Pmode
;
14196 rtx op0
= expand_normal (arg0
);
14197 rtx op1
= expand_normal (arg1
);
14199 if (icode
== CODE_FOR_nothing
)
14200 /* Builtin not supported on this processor. */
14203 /* If we got invalid arguments bail out before generating bad rtl. */
14204 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14208 || GET_MODE (target
) != tmode
14209 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14210 target
= gen_reg_rtx (tmode
);
14212 op1
= copy_to_mode_reg (mode1
, op1
);
14214 if (op0
== const0_rtx
)
14216 addr
= gen_rtx_MEM (tmode
, op1
);
14220 op0
= copy_to_mode_reg (mode0
, op0
);
14221 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
14224 pat
= GEN_FCN (icode
) (target
, addr
);
14233 /* Return a constant vector for use as a little-endian permute control vector
14234 to reverse the order of elements of the given vector mode. */
14236 swap_selector_for_mode (machine_mode mode
)
14238 /* These are little endian vectors, so their elements are reversed
14239 from what you would normally expect for a permute control vector. */
14240 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14241 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14242 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14243 unsigned int swap16
[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14244 unsigned int *swaparray
, i
;
14261 swaparray
= swap16
;
14264 gcc_unreachable ();
14267 for (i
= 0; i
< 16; ++i
)
14268 perm
[i
] = GEN_INT (swaparray
[i
]);
14270 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
)));
14273 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14274 with -maltivec=be specified. Issue the load followed by an element-
14275 reversing permute. */
14277 altivec_expand_lvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14279 rtx tmp
= gen_reg_rtx (mode
);
14280 rtx load
= gen_rtx_SET (tmp
, op1
);
14281 rtx lvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14282 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, load
, lvx
));
14283 rtx sel
= swap_selector_for_mode (mode
);
14284 rtx vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, tmp
, tmp
, sel
), UNSPEC_VPERM
);
14286 gcc_assert (REG_P (op0
));
14288 emit_insn (gen_rtx_SET (op0
, vperm
));
14291 /* Generate code for a "stvxl" built-in for a little endian target with
14292 -maltivec=be specified. Issue the store preceded by an element-reversing
14295 altivec_expand_stvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14297 rtx tmp
= gen_reg_rtx (mode
);
14298 rtx store
= gen_rtx_SET (op0
, tmp
);
14299 rtx stvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14300 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, store
, stvx
));
14301 rtx sel
= swap_selector_for_mode (mode
);
14304 gcc_assert (REG_P (op1
));
14305 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14306 emit_insn (gen_rtx_SET (tmp
, vperm
));
14310 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14311 specified. Issue the store preceded by an element-reversing permute. */
14313 altivec_expand_stvex_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14315 machine_mode inner_mode
= GET_MODE_INNER (mode
);
14316 rtx tmp
= gen_reg_rtx (mode
);
14317 rtx stvx
= gen_rtx_UNSPEC (inner_mode
, gen_rtvec (1, tmp
), unspec
);
14318 rtx sel
= swap_selector_for_mode (mode
);
14321 gcc_assert (REG_P (op1
));
14322 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14323 emit_insn (gen_rtx_SET (tmp
, vperm
));
14324 emit_insn (gen_rtx_SET (op0
, stvx
));
14328 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14331 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14332 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14333 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14334 machine_mode mode0
= Pmode
;
14335 machine_mode mode1
= Pmode
;
14336 rtx op0
= expand_normal (arg0
);
14337 rtx op1
= expand_normal (arg1
);
14339 if (icode
== CODE_FOR_nothing
)
14340 /* Builtin not supported on this processor. */
14343 /* If we got invalid arguments bail out before generating bad rtl. */
14344 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14348 || GET_MODE (target
) != tmode
14349 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14350 target
= gen_reg_rtx (tmode
);
14352 op1
= copy_to_mode_reg (mode1
, op1
);
14354 /* For LVX, express the RTL accurately by ANDing the address with -16.
14355 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14356 so the raw address is fine. */
14357 if (icode
== CODE_FOR_altivec_lvx_v2df_2op
14358 || icode
== CODE_FOR_altivec_lvx_v2di_2op
14359 || icode
== CODE_FOR_altivec_lvx_v4sf_2op
14360 || icode
== CODE_FOR_altivec_lvx_v4si_2op
14361 || icode
== CODE_FOR_altivec_lvx_v8hi_2op
14362 || icode
== CODE_FOR_altivec_lvx_v16qi_2op
)
14365 if (op0
== const0_rtx
)
14369 op0
= copy_to_mode_reg (mode0
, op0
);
14370 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
14372 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14373 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
14375 /* For -maltivec=be, emit the load and follow it up with a
14376 permute to swap the elements. */
14377 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14379 rtx temp
= gen_reg_rtx (tmode
);
14380 emit_insn (gen_rtx_SET (temp
, addr
));
14382 rtx sel
= swap_selector_for_mode (tmode
);
14383 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, temp
, temp
, sel
),
14385 emit_insn (gen_rtx_SET (target
, vperm
));
14388 emit_insn (gen_rtx_SET (target
, addr
));
14392 if (op0
== const0_rtx
)
14393 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14396 op0
= copy_to_mode_reg (mode0
, op0
);
14397 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14398 gen_rtx_PLUS (Pmode
, op1
, op0
));
14401 pat
= GEN_FCN (icode
) (target
, addr
);
14411 altivec_expand_xl_be_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14414 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14415 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14416 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14417 machine_mode mode0
= Pmode
;
14418 machine_mode mode1
= Pmode
;
14419 rtx op0
= expand_normal (arg0
);
14420 rtx op1
= expand_normal (arg1
);
14422 if (icode
== CODE_FOR_nothing
)
14423 /* Builtin not supported on this processor. */
14426 /* If we got invalid arguments bail out before generating bad rtl. */
14427 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14431 || GET_MODE (target
) != tmode
14432 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14433 target
= gen_reg_rtx (tmode
);
14435 op1
= copy_to_mode_reg (mode1
, op1
);
14437 if (op0
== const0_rtx
)
14438 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14441 op0
= copy_to_mode_reg (mode0
, op0
);
14442 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14443 gen_rtx_PLUS (Pmode
, op1
, op0
));
14446 pat
= GEN_FCN (icode
) (target
, addr
);
14451 /* Reverse element order of elements if in LE mode */
14452 if (!VECTOR_ELT_ORDER_BIG
)
14454 rtx sel
= swap_selector_for_mode (tmode
);
14455 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, target
, target
, sel
),
14457 emit_insn (gen_rtx_SET (target
, vperm
));
14463 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
14465 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14466 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14467 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14468 rtx op0
= expand_normal (arg0
);
14469 rtx op1
= expand_normal (arg1
);
14470 rtx op2
= expand_normal (arg2
);
14472 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14473 machine_mode mode1
= Pmode
;
14474 machine_mode mode2
= Pmode
;
14476 /* Invalid arguments. Bail before doing anything stoopid! */
14477 if (arg0
== error_mark_node
14478 || arg1
== error_mark_node
14479 || arg2
== error_mark_node
)
14482 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
14483 op0
= copy_to_mode_reg (tmode
, op0
);
14485 op2
= copy_to_mode_reg (mode2
, op2
);
14487 if (op1
== const0_rtx
)
14489 addr
= gen_rtx_MEM (tmode
, op2
);
14493 op1
= copy_to_mode_reg (mode1
, op1
);
14494 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
14497 pat
= GEN_FCN (icode
) (addr
, op0
);
14504 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
14507 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14508 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14509 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14510 rtx op0
= expand_normal (arg0
);
14511 rtx op1
= expand_normal (arg1
);
14512 rtx op2
= expand_normal (arg2
);
14513 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
14514 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
14515 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
14517 if (icode
== CODE_FOR_nothing
)
14518 /* Builtin not supported on this processor. */
14521 /* If we got invalid arguments bail out before generating bad rtl. */
14522 if (arg0
== error_mark_node
14523 || arg1
== error_mark_node
14524 || arg2
== error_mark_node
)
14527 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14528 op0
= copy_to_mode_reg (mode0
, op0
);
14529 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14530 op1
= copy_to_mode_reg (mode1
, op1
);
14531 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14532 op2
= copy_to_mode_reg (mode2
, op2
);
14534 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14542 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
14544 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14545 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14546 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14547 rtx op0
= expand_normal (arg0
);
14548 rtx op1
= expand_normal (arg1
);
14549 rtx op2
= expand_normal (arg2
);
14550 rtx pat
, addr
, rawaddr
;
14551 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14552 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
14553 machine_mode mode1
= Pmode
;
14554 machine_mode mode2
= Pmode
;
14556 /* Invalid arguments. Bail before doing anything stoopid! */
14557 if (arg0
== error_mark_node
14558 || arg1
== error_mark_node
14559 || arg2
== error_mark_node
)
14562 op2
= copy_to_mode_reg (mode2
, op2
);
14564 /* For STVX, express the RTL accurately by ANDing the address with -16.
14565 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14566 so the raw address is fine. */
14567 if (icode
== CODE_FOR_altivec_stvx_v2df_2op
14568 || icode
== CODE_FOR_altivec_stvx_v2di_2op
14569 || icode
== CODE_FOR_altivec_stvx_v4sf_2op
14570 || icode
== CODE_FOR_altivec_stvx_v4si_2op
14571 || icode
== CODE_FOR_altivec_stvx_v8hi_2op
14572 || icode
== CODE_FOR_altivec_stvx_v16qi_2op
)
14574 if (op1
== const0_rtx
)
14578 op1
= copy_to_mode_reg (mode1
, op1
);
14579 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
14582 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14583 addr
= gen_rtx_MEM (tmode
, addr
);
14585 op0
= copy_to_mode_reg (tmode
, op0
);
14587 /* For -maltivec=be, emit a permute to swap the elements, followed
14589 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14591 rtx temp
= gen_reg_rtx (tmode
);
14592 rtx sel
= swap_selector_for_mode (tmode
);
14593 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, op0
, op0
, sel
),
14595 emit_insn (gen_rtx_SET (temp
, vperm
));
14596 emit_insn (gen_rtx_SET (addr
, temp
));
14599 emit_insn (gen_rtx_SET (addr
, op0
));
14603 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
14604 op0
= copy_to_mode_reg (smode
, op0
);
14606 if (op1
== const0_rtx
)
14607 addr
= gen_rtx_MEM (tmode
, op2
);
14610 op1
= copy_to_mode_reg (mode1
, op1
);
14611 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
14614 pat
= GEN_FCN (icode
) (addr
, op0
);
14622 /* Return the appropriate SPR number associated with the given builtin. */
14623 static inline HOST_WIDE_INT
14624 htm_spr_num (enum rs6000_builtins code
)
14626 if (code
== HTM_BUILTIN_GET_TFHAR
14627 || code
== HTM_BUILTIN_SET_TFHAR
)
14629 else if (code
== HTM_BUILTIN_GET_TFIAR
14630 || code
== HTM_BUILTIN_SET_TFIAR
)
14632 else if (code
== HTM_BUILTIN_GET_TEXASR
14633 || code
== HTM_BUILTIN_SET_TEXASR
)
14635 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14636 || code
== HTM_BUILTIN_SET_TEXASRU
);
14637 return TEXASRU_SPR
;
14640 /* Return the appropriate SPR regno associated with the given builtin. */
14641 static inline HOST_WIDE_INT
14642 htm_spr_regno (enum rs6000_builtins code
)
14644 if (code
== HTM_BUILTIN_GET_TFHAR
14645 || code
== HTM_BUILTIN_SET_TFHAR
)
14646 return TFHAR_REGNO
;
14647 else if (code
== HTM_BUILTIN_GET_TFIAR
14648 || code
== HTM_BUILTIN_SET_TFIAR
)
14649 return TFIAR_REGNO
;
14650 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14651 || code
== HTM_BUILTIN_SET_TEXASR
14652 || code
== HTM_BUILTIN_GET_TEXASRU
14653 || code
== HTM_BUILTIN_SET_TEXASRU
);
14654 return TEXASR_REGNO
;
14657 /* Return the correct ICODE value depending on whether we are
14658 setting or reading the HTM SPRs. */
14659 static inline enum insn_code
14660 rs6000_htm_spr_icode (bool nonvoid
)
14663 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14665 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14668 /* Expand the HTM builtin in EXP and store the result in TARGET.
14669 Store true in *EXPANDEDP if we found a builtin to expand. */
14671 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14673 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14674 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14675 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14676 const struct builtin_description
*d
;
14681 if (!TARGET_POWERPC64
14682 && (fcode
== HTM_BUILTIN_TABORTDC
14683 || fcode
== HTM_BUILTIN_TABORTDCI
))
14685 size_t uns_fcode
= (size_t)fcode
;
14686 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14687 error ("builtin %qs is only valid in 64-bit mode", name
);
14691 /* Expand the HTM builtins. */
14693 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14694 if (d
->code
== fcode
)
14696 rtx op
[MAX_HTM_OPERANDS
], pat
;
14699 call_expr_arg_iterator iter
;
14700 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14701 enum insn_code icode
= d
->icode
;
14702 const struct insn_operand_data
*insn_op
;
14703 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14707 icode
= rs6000_htm_spr_icode (nonvoid
);
14708 insn_op
= &insn_data
[icode
].operand
[0];
14712 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: E_SImode
;
14714 || GET_MODE (target
) != tmode
14715 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14716 target
= gen_reg_rtx (tmode
);
14718 op
[nopnds
++] = target
;
14721 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14723 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14726 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14728 op
[nopnds
] = expand_normal (arg
);
14730 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14732 if (!strcmp (insn_op
->constraint
, "n"))
14734 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14735 if (!CONST_INT_P (op
[nopnds
]))
14736 error ("argument %d must be an unsigned literal", arg_num
);
14738 error ("argument %d is an unsigned literal that is "
14739 "out of range", arg_num
);
14742 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14748 /* Handle the builtins for extended mnemonics. These accept
14749 no arguments, but map to builtins that take arguments. */
14752 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14753 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14754 op
[nopnds
++] = GEN_INT (1);
14756 attr
|= RS6000_BTC_UNARY
;
14758 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14759 op
[nopnds
++] = GEN_INT (0);
14761 attr
|= RS6000_BTC_UNARY
;
14767 /* If this builtin accesses SPRs, then pass in the appropriate
14768 SPR number and SPR regno as the last two operands. */
14771 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14772 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14773 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14775 /* If this builtin accesses a CR, then pass in a scratch
14776 CR as the last operand. */
14777 else if (attr
& RS6000_BTC_CR
)
14778 { cr
= gen_reg_rtx (CCmode
);
14784 int expected_nopnds
= 0;
14785 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14786 expected_nopnds
= 1;
14787 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14788 expected_nopnds
= 2;
14789 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14790 expected_nopnds
= 3;
14791 if (!(attr
& RS6000_BTC_VOID
))
14792 expected_nopnds
+= 1;
14794 expected_nopnds
+= 2;
14796 gcc_assert (nopnds
== expected_nopnds
14797 && nopnds
<= MAX_HTM_OPERANDS
);
14803 pat
= GEN_FCN (icode
) (op
[0]);
14806 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14809 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14812 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14815 gcc_unreachable ();
14821 if (attr
& RS6000_BTC_CR
)
14823 if (fcode
== HTM_BUILTIN_TBEGIN
)
14825 /* Emit code to set TARGET to true or false depending on
14826 whether the tbegin. instruction successfully or failed
14827 to start a transaction. We do this by placing the 1's
14828 complement of CR's EQ bit into TARGET. */
14829 rtx scratch
= gen_reg_rtx (SImode
);
14830 emit_insn (gen_rtx_SET (scratch
,
14831 gen_rtx_EQ (SImode
, cr
,
14833 emit_insn (gen_rtx_SET (target
,
14834 gen_rtx_XOR (SImode
, scratch
,
14839 /* Emit code to copy the 4-bit condition register field
14840 CR into the least significant end of register TARGET. */
14841 rtx scratch1
= gen_reg_rtx (SImode
);
14842 rtx scratch2
= gen_reg_rtx (SImode
);
14843 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14844 emit_insn (gen_movcc (subreg
, cr
));
14845 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14846 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14855 *expandedp
= false;
14859 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14862 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14865 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14866 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14869 if (target
== 0 || GET_MODE (target
) != SImode
)
14870 target
= gen_reg_rtx (SImode
);
14872 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14873 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14874 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14875 to a STRING_CST. */
14876 if (TREE_CODE (arg
) == ARRAY_REF
14877 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14878 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14879 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14880 arg
= TREE_OPERAND (arg
, 0);
14882 if (TREE_CODE (arg
) != STRING_CST
)
14884 error ("builtin %qs only accepts a string argument",
14885 rs6000_builtin_info
[(size_t) fcode
].name
);
14889 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14891 const char *cpu
= TREE_STRING_POINTER (arg
);
14892 rtx cpuid
= NULL_RTX
;
14893 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14894 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14896 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14897 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14900 if (cpuid
== NULL_RTX
)
14902 /* Invalid CPU argument. */
14903 error ("cpu %qs is an invalid argument to builtin %qs",
14904 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14908 rtx platform
= gen_reg_rtx (SImode
);
14909 rtx tcbmem
= gen_const_mem (SImode
,
14910 gen_rtx_PLUS (Pmode
,
14911 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14912 GEN_INT (TCB_PLATFORM_OFFSET
)));
14913 emit_move_insn (platform
, tcbmem
);
14914 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14916 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14918 const char *hwcap
= TREE_STRING_POINTER (arg
);
14919 rtx mask
= NULL_RTX
;
14921 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14922 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14924 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14925 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14928 if (mask
== NULL_RTX
)
14930 /* Invalid HWCAP argument. */
14931 error ("%s %qs is an invalid argument to builtin %qs",
14932 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14936 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14937 rtx tcbmem
= gen_const_mem (SImode
,
14938 gen_rtx_PLUS (Pmode
,
14939 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14940 GEN_INT (hwcap_offset
)));
14941 emit_move_insn (tcb_hwcap
, tcbmem
);
14942 rtx scratch1
= gen_reg_rtx (SImode
);
14943 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14944 rtx scratch2
= gen_reg_rtx (SImode
);
14945 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14946 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14949 gcc_unreachable ();
14951 /* Record that we have expanded a CPU builtin, so that we can later
14952 emit a reference to the special symbol exported by LIBC to ensure we
14953 do not link against an old LIBC that doesn't support this feature. */
14954 cpu_builtin_p
= true;
14957 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14958 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14960 /* For old LIBCs, always return FALSE. */
14961 emit_move_insn (target
, GEN_INT (0));
14962 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14968 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14971 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14972 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14973 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14974 rtx op0
= expand_normal (arg0
);
14975 rtx op1
= expand_normal (arg1
);
14976 rtx op2
= expand_normal (arg2
);
14977 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14978 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14979 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14980 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
14982 if (icode
== CODE_FOR_nothing
)
14983 /* Builtin not supported on this processor. */
14986 /* If we got invalid arguments bail out before generating bad rtl. */
14987 if (arg0
== error_mark_node
14988 || arg1
== error_mark_node
14989 || arg2
== error_mark_node
)
14992 /* Check and prepare argument depending on the instruction code.
14994 Note that a switch statement instead of the sequence of tests
14995 would be incorrect as many of the CODE_FOR values could be
14996 CODE_FOR_nothing and that would yield multiple alternatives
14997 with identical values. We'd never reach here at runtime in
14999 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
15000 || icode
== CODE_FOR_altivec_vsldoi_v2df
15001 || icode
== CODE_FOR_altivec_vsldoi_v4si
15002 || icode
== CODE_FOR_altivec_vsldoi_v8hi
15003 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
15005 /* Only allow 4-bit unsigned literals. */
15007 if (TREE_CODE (arg2
) != INTEGER_CST
15008 || TREE_INT_CST_LOW (arg2
) & ~0xf)
15010 error ("argument 3 must be a 4-bit unsigned literal");
15011 return CONST0_RTX (tmode
);
15014 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
15015 || icode
== CODE_FOR_vsx_xxpermdi_v2di
15016 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
15017 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
15018 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
15019 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
15020 || icode
== CODE_FOR_vsx_xxpermdi_v4si
15021 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
15022 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
15023 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
15024 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
15025 || icode
== CODE_FOR_vsx_xxsldwi_v4si
15026 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
15027 || icode
== CODE_FOR_vsx_xxsldwi_v2di
15028 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
15030 /* Only allow 2-bit unsigned literals. */
15032 if (TREE_CODE (arg2
) != INTEGER_CST
15033 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15035 error ("argument 3 must be a 2-bit unsigned literal");
15036 return CONST0_RTX (tmode
);
15039 else if (icode
== CODE_FOR_vsx_set_v2df
15040 || icode
== CODE_FOR_vsx_set_v2di
15041 || icode
== CODE_FOR_bcdadd
15042 || icode
== CODE_FOR_bcdadd_lt
15043 || icode
== CODE_FOR_bcdadd_eq
15044 || icode
== CODE_FOR_bcdadd_gt
15045 || icode
== CODE_FOR_bcdsub
15046 || icode
== CODE_FOR_bcdsub_lt
15047 || icode
== CODE_FOR_bcdsub_eq
15048 || icode
== CODE_FOR_bcdsub_gt
)
15050 /* Only allow 1-bit unsigned literals. */
15052 if (TREE_CODE (arg2
) != INTEGER_CST
15053 || TREE_INT_CST_LOW (arg2
) & ~0x1)
15055 error ("argument 3 must be a 1-bit unsigned literal");
15056 return CONST0_RTX (tmode
);
15059 else if (icode
== CODE_FOR_dfp_ddedpd_dd
15060 || icode
== CODE_FOR_dfp_ddedpd_td
)
15062 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15064 if (TREE_CODE (arg0
) != INTEGER_CST
15065 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15067 error ("argument 1 must be 0 or 2");
15068 return CONST0_RTX (tmode
);
15071 else if (icode
== CODE_FOR_dfp_denbcd_dd
15072 || icode
== CODE_FOR_dfp_denbcd_td
)
15074 /* Only allow 1-bit unsigned literals. */
15076 if (TREE_CODE (arg0
) != INTEGER_CST
15077 || TREE_INT_CST_LOW (arg0
) & ~0x1)
15079 error ("argument 1 must be a 1-bit unsigned literal");
15080 return CONST0_RTX (tmode
);
15083 else if (icode
== CODE_FOR_dfp_dscli_dd
15084 || icode
== CODE_FOR_dfp_dscli_td
15085 || icode
== CODE_FOR_dfp_dscri_dd
15086 || icode
== CODE_FOR_dfp_dscri_td
)
15088 /* Only allow 6-bit unsigned literals. */
15090 if (TREE_CODE (arg1
) != INTEGER_CST
15091 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
15093 error ("argument 2 must be a 6-bit unsigned literal");
15094 return CONST0_RTX (tmode
);
15097 else if (icode
== CODE_FOR_crypto_vshasigmaw
15098 || icode
== CODE_FOR_crypto_vshasigmad
)
15100 /* Check whether the 2nd and 3rd arguments are integer constants and in
15101 range and prepare arguments. */
15103 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (wi::to_wide (arg1
), 2))
15105 error ("argument 2 must be 0 or 1");
15106 return CONST0_RTX (tmode
);
15110 if (TREE_CODE (arg2
) != INTEGER_CST
15111 || wi::geu_p (wi::to_wide (arg2
), 16))
15113 error ("argument 3 must be in the range 0..15");
15114 return CONST0_RTX (tmode
);
15119 || GET_MODE (target
) != tmode
15120 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15121 target
= gen_reg_rtx (tmode
);
15123 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15124 op0
= copy_to_mode_reg (mode0
, op0
);
15125 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15126 op1
= copy_to_mode_reg (mode1
, op1
);
15127 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
15128 op2
= copy_to_mode_reg (mode2
, op2
);
15130 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
15131 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
15133 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
15141 /* Expand the lvx builtins. */
15143 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
15145 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15146 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15148 machine_mode tmode
, mode0
;
15150 enum insn_code icode
;
15154 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
15155 icode
= CODE_FOR_vector_altivec_load_v16qi
;
15157 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
15158 icode
= CODE_FOR_vector_altivec_load_v8hi
;
15160 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
15161 icode
= CODE_FOR_vector_altivec_load_v4si
;
15163 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
15164 icode
= CODE_FOR_vector_altivec_load_v4sf
;
15166 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
15167 icode
= CODE_FOR_vector_altivec_load_v2df
;
15169 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
15170 icode
= CODE_FOR_vector_altivec_load_v2di
;
15172 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti
:
15173 icode
= CODE_FOR_vector_altivec_load_v1ti
;
15176 *expandedp
= false;
15182 arg0
= CALL_EXPR_ARG (exp
, 0);
15183 op0
= expand_normal (arg0
);
15184 tmode
= insn_data
[icode
].operand
[0].mode
;
15185 mode0
= insn_data
[icode
].operand
[1].mode
;
15188 || GET_MODE (target
) != tmode
15189 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15190 target
= gen_reg_rtx (tmode
);
15192 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15193 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15195 pat
= GEN_FCN (icode
) (target
, op0
);
15202 /* Expand the stvx builtins. */
15204 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15207 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15208 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15210 machine_mode mode0
, mode1
;
15212 enum insn_code icode
;
15216 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
15217 icode
= CODE_FOR_vector_altivec_store_v16qi
;
15219 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
15220 icode
= CODE_FOR_vector_altivec_store_v8hi
;
15222 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
15223 icode
= CODE_FOR_vector_altivec_store_v4si
;
15225 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
15226 icode
= CODE_FOR_vector_altivec_store_v4sf
;
15228 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
15229 icode
= CODE_FOR_vector_altivec_store_v2df
;
15231 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
15232 icode
= CODE_FOR_vector_altivec_store_v2di
;
15234 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti
:
15235 icode
= CODE_FOR_vector_altivec_store_v1ti
;
15238 *expandedp
= false;
15242 arg0
= CALL_EXPR_ARG (exp
, 0);
15243 arg1
= CALL_EXPR_ARG (exp
, 1);
15244 op0
= expand_normal (arg0
);
15245 op1
= expand_normal (arg1
);
15246 mode0
= insn_data
[icode
].operand
[0].mode
;
15247 mode1
= insn_data
[icode
].operand
[1].mode
;
15249 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15250 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15251 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
15252 op1
= copy_to_mode_reg (mode1
, op1
);
15254 pat
= GEN_FCN (icode
) (op0
, op1
);
15262 /* Expand the dst builtins. */
15264 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15267 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15268 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15269 tree arg0
, arg1
, arg2
;
15270 machine_mode mode0
, mode1
;
15271 rtx pat
, op0
, op1
, op2
;
15272 const struct builtin_description
*d
;
15275 *expandedp
= false;
15277 /* Handle DST variants. */
15279 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
15280 if (d
->code
== fcode
)
15282 arg0
= CALL_EXPR_ARG (exp
, 0);
15283 arg1
= CALL_EXPR_ARG (exp
, 1);
15284 arg2
= CALL_EXPR_ARG (exp
, 2);
15285 op0
= expand_normal (arg0
);
15286 op1
= expand_normal (arg1
);
15287 op2
= expand_normal (arg2
);
15288 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
15289 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
15291 /* Invalid arguments, bail out before generating bad rtl. */
15292 if (arg0
== error_mark_node
15293 || arg1
== error_mark_node
15294 || arg2
== error_mark_node
)
15299 if (TREE_CODE (arg2
) != INTEGER_CST
15300 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15302 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
15306 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
15307 op0
= copy_to_mode_reg (Pmode
, op0
);
15308 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
15309 op1
= copy_to_mode_reg (mode1
, op1
);
15311 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
15321 /* Expand vec_init builtin. */
15323 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
15325 machine_mode tmode
= TYPE_MODE (type
);
15326 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
15327 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
15329 gcc_assert (VECTOR_MODE_P (tmode
));
15330 gcc_assert (n_elt
== call_expr_nargs (exp
));
15332 if (!target
|| !register_operand (target
, tmode
))
15333 target
= gen_reg_rtx (tmode
);
15335 /* If we have a vector compromised of a single element, such as V1TImode, do
15336 the initialization directly. */
15337 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
15339 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
15340 emit_move_insn (target
, gen_lowpart (tmode
, x
));
15344 rtvec v
= rtvec_alloc (n_elt
);
15346 for (i
= 0; i
< n_elt
; ++i
)
15348 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
15349 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
15352 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
15358 /* Return the integer constant in ARG. Constrain it to be in the range
15359 of the subparts of VEC_TYPE; issue an error if not. */
15362 get_element_number (tree vec_type
, tree arg
)
15364 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
15366 if (!tree_fits_uhwi_p (arg
)
15367 || (elt
= tree_to_uhwi (arg
), elt
> max
))
15369 error ("selector must be an integer constant in the range 0..%wi", max
);
15376 /* Expand vec_set builtin. */
15378 altivec_expand_vec_set_builtin (tree exp
)
15380 machine_mode tmode
, mode1
;
15381 tree arg0
, arg1
, arg2
;
15385 arg0
= CALL_EXPR_ARG (exp
, 0);
15386 arg1
= CALL_EXPR_ARG (exp
, 1);
15387 arg2
= CALL_EXPR_ARG (exp
, 2);
15389 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
15390 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15391 gcc_assert (VECTOR_MODE_P (tmode
));
15393 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
15394 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
15395 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
15397 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
15398 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
15400 op0
= force_reg (tmode
, op0
);
15401 op1
= force_reg (mode1
, op1
);
15403 rs6000_expand_vector_set (op0
, op1
, elt
);
15408 /* Expand vec_ext builtin. */
15410 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
15412 machine_mode tmode
, mode0
;
15417 arg0
= CALL_EXPR_ARG (exp
, 0);
15418 arg1
= CALL_EXPR_ARG (exp
, 1);
15420 op0
= expand_normal (arg0
);
15421 op1
= expand_normal (arg1
);
15423 /* Call get_element_number to validate arg1 if it is a constant. */
15424 if (TREE_CODE (arg1
) == INTEGER_CST
)
15425 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
15427 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15428 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
15429 gcc_assert (VECTOR_MODE_P (mode0
));
15431 op0
= force_reg (mode0
, op0
);
15433 if (optimize
|| !target
|| !register_operand (target
, tmode
))
15434 target
= gen_reg_rtx (tmode
);
15436 rs6000_expand_vector_extract (target
, op0
, op1
);
15441 /* Expand the builtin in EXP and store the result in TARGET. Store
15442 true in *EXPANDEDP if we found a builtin to expand. */
15444 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
15446 const struct builtin_description
*d
;
15448 enum insn_code icode
;
15449 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15450 tree arg0
, arg1
, arg2
;
15452 machine_mode tmode
, mode0
;
15453 enum rs6000_builtins fcode
15454 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15456 if (rs6000_overloaded_builtin_p (fcode
))
15459 error ("unresolved overload for Altivec builtin %qF", fndecl
);
15461 /* Given it is invalid, just generate a normal call. */
15462 return expand_call (exp
, target
, false);
15465 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
15469 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
15473 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
15481 case ALTIVEC_BUILTIN_STVX_V2DF
:
15482 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op
, exp
);
15483 case ALTIVEC_BUILTIN_STVX_V2DI
:
15484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op
, exp
);
15485 case ALTIVEC_BUILTIN_STVX_V4SF
:
15486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op
, exp
);
15487 case ALTIVEC_BUILTIN_STVX
:
15488 case ALTIVEC_BUILTIN_STVX_V4SI
:
15489 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op
, exp
);
15490 case ALTIVEC_BUILTIN_STVX_V8HI
:
15491 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op
, exp
);
15492 case ALTIVEC_BUILTIN_STVX_V16QI
:
15493 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op
, exp
);
15494 case ALTIVEC_BUILTIN_STVEBX
:
15495 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
15496 case ALTIVEC_BUILTIN_STVEHX
:
15497 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
15498 case ALTIVEC_BUILTIN_STVEWX
:
15499 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
15500 case ALTIVEC_BUILTIN_STVXL_V2DF
:
15501 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
15502 case ALTIVEC_BUILTIN_STVXL_V2DI
:
15503 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
15504 case ALTIVEC_BUILTIN_STVXL_V4SF
:
15505 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
15506 case ALTIVEC_BUILTIN_STVXL
:
15507 case ALTIVEC_BUILTIN_STVXL_V4SI
:
15508 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
15509 case ALTIVEC_BUILTIN_STVXL_V8HI
:
15510 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
15511 case ALTIVEC_BUILTIN_STVXL_V16QI
:
15512 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
15514 case ALTIVEC_BUILTIN_STVLX
:
15515 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
15516 case ALTIVEC_BUILTIN_STVLXL
:
15517 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
15518 case ALTIVEC_BUILTIN_STVRX
:
15519 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
15520 case ALTIVEC_BUILTIN_STVRXL
:
15521 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
15523 case P9V_BUILTIN_STXVL
:
15524 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
15526 case P9V_BUILTIN_XST_LEN_R
:
15527 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r
, exp
);
15529 case VSX_BUILTIN_STXVD2X_V1TI
:
15530 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
15531 case VSX_BUILTIN_STXVD2X_V2DF
:
15532 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
15533 case VSX_BUILTIN_STXVD2X_V2DI
:
15534 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
15535 case VSX_BUILTIN_STXVW4X_V4SF
:
15536 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
15537 case VSX_BUILTIN_STXVW4X_V4SI
:
15538 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
15539 case VSX_BUILTIN_STXVW4X_V8HI
:
15540 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
15541 case VSX_BUILTIN_STXVW4X_V16QI
:
15542 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
15544 /* For the following on big endian, it's ok to use any appropriate
15545 unaligned-supporting store, so use a generic expander. For
15546 little-endian, the exact element-reversing instruction must
15548 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
15550 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
15551 : CODE_FOR_vsx_st_elemrev_v2df
);
15552 return altivec_expand_stv_builtin (code
, exp
);
15554 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
15556 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
15557 : CODE_FOR_vsx_st_elemrev_v2di
);
15558 return altivec_expand_stv_builtin (code
, exp
);
15560 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
15562 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
15563 : CODE_FOR_vsx_st_elemrev_v4sf
);
15564 return altivec_expand_stv_builtin (code
, exp
);
15566 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
15568 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
15569 : CODE_FOR_vsx_st_elemrev_v4si
);
15570 return altivec_expand_stv_builtin (code
, exp
);
15572 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
15574 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
15575 : CODE_FOR_vsx_st_elemrev_v8hi
);
15576 return altivec_expand_stv_builtin (code
, exp
);
15578 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
15580 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
15581 : CODE_FOR_vsx_st_elemrev_v16qi
);
15582 return altivec_expand_stv_builtin (code
, exp
);
15585 case ALTIVEC_BUILTIN_MFVSCR
:
15586 icode
= CODE_FOR_altivec_mfvscr
;
15587 tmode
= insn_data
[icode
].operand
[0].mode
;
15590 || GET_MODE (target
) != tmode
15591 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15592 target
= gen_reg_rtx (tmode
);
15594 pat
= GEN_FCN (icode
) (target
);
15600 case ALTIVEC_BUILTIN_MTVSCR
:
15601 icode
= CODE_FOR_altivec_mtvscr
;
15602 arg0
= CALL_EXPR_ARG (exp
, 0);
15603 op0
= expand_normal (arg0
);
15604 mode0
= insn_data
[icode
].operand
[0].mode
;
15606 /* If we got invalid arguments bail out before generating bad rtl. */
15607 if (arg0
== error_mark_node
)
15610 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15611 op0
= copy_to_mode_reg (mode0
, op0
);
15613 pat
= GEN_FCN (icode
) (op0
);
15618 case ALTIVEC_BUILTIN_DSSALL
:
15619 emit_insn (gen_altivec_dssall ());
15622 case ALTIVEC_BUILTIN_DSS
:
15623 icode
= CODE_FOR_altivec_dss
;
15624 arg0
= CALL_EXPR_ARG (exp
, 0);
15626 op0
= expand_normal (arg0
);
15627 mode0
= insn_data
[icode
].operand
[0].mode
;
15629 /* If we got invalid arguments bail out before generating bad rtl. */
15630 if (arg0
== error_mark_node
)
15633 if (TREE_CODE (arg0
) != INTEGER_CST
15634 || TREE_INT_CST_LOW (arg0
) & ~0x3)
15636 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15640 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15641 op0
= copy_to_mode_reg (mode0
, op0
);
15643 emit_insn (gen_altivec_dss (op0
));
15646 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
15647 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
15648 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
15649 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
15650 case VSX_BUILTIN_VEC_INIT_V2DF
:
15651 case VSX_BUILTIN_VEC_INIT_V2DI
:
15652 case VSX_BUILTIN_VEC_INIT_V1TI
:
15653 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
15655 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
15656 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
15657 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
15658 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
15659 case VSX_BUILTIN_VEC_SET_V2DF
:
15660 case VSX_BUILTIN_VEC_SET_V2DI
:
15661 case VSX_BUILTIN_VEC_SET_V1TI
:
15662 return altivec_expand_vec_set_builtin (exp
);
15664 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
15665 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
15666 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
15667 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
15668 case VSX_BUILTIN_VEC_EXT_V2DF
:
15669 case VSX_BUILTIN_VEC_EXT_V2DI
:
15670 case VSX_BUILTIN_VEC_EXT_V1TI
:
15671 return altivec_expand_vec_ext_builtin (exp
, target
);
15673 case P9V_BUILTIN_VEXTRACT4B
:
15674 case P9V_BUILTIN_VEC_VEXTRACT4B
:
15675 arg1
= CALL_EXPR_ARG (exp
, 1);
15678 /* Generate a normal call if it is invalid. */
15679 if (arg1
== error_mark_node
)
15680 return expand_call (exp
, target
, false);
15682 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
15684 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15685 return expand_call (exp
, target
, false);
15689 case P9V_BUILTIN_VINSERT4B
:
15690 case P9V_BUILTIN_VINSERT4B_DI
:
15691 case P9V_BUILTIN_VEC_VINSERT4B
:
15692 arg2
= CALL_EXPR_ARG (exp
, 2);
15695 /* Generate a normal call if it is invalid. */
15696 if (arg2
== error_mark_node
)
15697 return expand_call (exp
, target
, false);
15699 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
15701 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15702 return expand_call (exp
, target
, false);
15708 /* Fall through. */
15711 /* Expand abs* operations. */
15713 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
15714 if (d
->code
== fcode
)
15715 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
15717 /* Expand the AltiVec predicates. */
15718 d
= bdesc_altivec_preds
;
15719 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
15720 if (d
->code
== fcode
)
15721 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
15723 /* LV* are funky. We initialized them differently. */
15726 case ALTIVEC_BUILTIN_LVSL
:
15727 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
15728 exp
, target
, false);
15729 case ALTIVEC_BUILTIN_LVSR
:
15730 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
15731 exp
, target
, false);
15732 case ALTIVEC_BUILTIN_LVEBX
:
15733 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
15734 exp
, target
, false);
15735 case ALTIVEC_BUILTIN_LVEHX
:
15736 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
15737 exp
, target
, false);
15738 case ALTIVEC_BUILTIN_LVEWX
:
15739 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
15740 exp
, target
, false);
15741 case ALTIVEC_BUILTIN_LVXL_V2DF
:
15742 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
15743 exp
, target
, false);
15744 case ALTIVEC_BUILTIN_LVXL_V2DI
:
15745 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
15746 exp
, target
, false);
15747 case ALTIVEC_BUILTIN_LVXL_V4SF
:
15748 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
15749 exp
, target
, false);
15750 case ALTIVEC_BUILTIN_LVXL
:
15751 case ALTIVEC_BUILTIN_LVXL_V4SI
:
15752 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
15753 exp
, target
, false);
15754 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15755 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15756 exp
, target
, false);
15757 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15758 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15759 exp
, target
, false);
15760 case ALTIVEC_BUILTIN_LVX_V2DF
:
15761 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op
,
15762 exp
, target
, false);
15763 case ALTIVEC_BUILTIN_LVX_V2DI
:
15764 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op
,
15765 exp
, target
, false);
15766 case ALTIVEC_BUILTIN_LVX_V4SF
:
15767 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op
,
15768 exp
, target
, false);
15769 case ALTIVEC_BUILTIN_LVX
:
15770 case ALTIVEC_BUILTIN_LVX_V4SI
:
15771 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op
,
15772 exp
, target
, false);
15773 case ALTIVEC_BUILTIN_LVX_V8HI
:
15774 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op
,
15775 exp
, target
, false);
15776 case ALTIVEC_BUILTIN_LVX_V16QI
:
15777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op
,
15778 exp
, target
, false);
15779 case ALTIVEC_BUILTIN_LVLX
:
15780 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15781 exp
, target
, true);
15782 case ALTIVEC_BUILTIN_LVLXL
:
15783 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15784 exp
, target
, true);
15785 case ALTIVEC_BUILTIN_LVRX
:
15786 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15787 exp
, target
, true);
15788 case ALTIVEC_BUILTIN_LVRXL
:
15789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15790 exp
, target
, true);
15791 case VSX_BUILTIN_LXVD2X_V1TI
:
15792 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15793 exp
, target
, false);
15794 case VSX_BUILTIN_LXVD2X_V2DF
:
15795 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15796 exp
, target
, false);
15797 case VSX_BUILTIN_LXVD2X_V2DI
:
15798 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15799 exp
, target
, false);
15800 case VSX_BUILTIN_LXVW4X_V4SF
:
15801 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15802 exp
, target
, false);
15803 case VSX_BUILTIN_LXVW4X_V4SI
:
15804 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15805 exp
, target
, false);
15806 case VSX_BUILTIN_LXVW4X_V8HI
:
15807 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15808 exp
, target
, false);
15809 case VSX_BUILTIN_LXVW4X_V16QI
:
15810 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15811 exp
, target
, false);
15812 /* For the following on big endian, it's ok to use any appropriate
15813 unaligned-supporting load, so use a generic expander. For
15814 little-endian, the exact element-reversing instruction must
15816 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15818 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15819 : CODE_FOR_vsx_ld_elemrev_v2df
);
15820 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15822 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15824 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15825 : CODE_FOR_vsx_ld_elemrev_v2di
);
15826 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15828 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15830 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15831 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15832 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15834 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15836 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15837 : CODE_FOR_vsx_ld_elemrev_v4si
);
15838 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15840 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15842 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15843 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15844 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15846 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15848 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15849 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15850 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15855 /* Fall through. */
15858 /* XL_BE We initialized them to always load in big endian order. */
15861 case VSX_BUILTIN_XL_BE_V2DI
:
15863 enum insn_code code
= CODE_FOR_vsx_load_v2di
;
15864 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15867 case VSX_BUILTIN_XL_BE_V4SI
:
15869 enum insn_code code
= CODE_FOR_vsx_load_v4si
;
15870 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15873 case VSX_BUILTIN_XL_BE_V8HI
:
15875 enum insn_code code
= CODE_FOR_vsx_load_v8hi
;
15876 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15879 case VSX_BUILTIN_XL_BE_V16QI
:
15881 enum insn_code code
= CODE_FOR_vsx_load_v16qi
;
15882 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15885 case VSX_BUILTIN_XL_BE_V2DF
:
15887 enum insn_code code
= CODE_FOR_vsx_load_v2df
;
15888 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15891 case VSX_BUILTIN_XL_BE_V4SF
:
15893 enum insn_code code
= CODE_FOR_vsx_load_v4sf
;
15894 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15899 /* Fall through. */
15902 *expandedp
= false;
15906 /* Expand the builtin in EXP and store the result in TARGET. Store
15907 true in *EXPANDEDP if we found a builtin to expand. */
15909 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
15911 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15912 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15913 const struct builtin_description
*d
;
15920 case PAIRED_BUILTIN_STX
:
15921 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
15922 case PAIRED_BUILTIN_LX
:
15923 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
15926 /* Fall through. */
15929 /* Expand the paired predicates. */
15930 d
= bdesc_paired_preds
;
15931 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
15932 if (d
->code
== fcode
)
15933 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
15935 *expandedp
= false;
15940 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
15942 rtx pat
, scratch
, tmp
;
15943 tree form
= CALL_EXPR_ARG (exp
, 0);
15944 tree arg0
= CALL_EXPR_ARG (exp
, 1);
15945 tree arg1
= CALL_EXPR_ARG (exp
, 2);
15946 rtx op0
= expand_normal (arg0
);
15947 rtx op1
= expand_normal (arg1
);
15948 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
15949 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
15951 enum rtx_code code
;
15953 if (TREE_CODE (form
) != INTEGER_CST
)
15955 error ("argument 1 of %s must be a constant",
15956 "__builtin_paired_predicate");
15960 form_int
= TREE_INT_CST_LOW (form
);
15962 gcc_assert (mode0
== mode1
);
15964 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
15968 || GET_MODE (target
) != SImode
15969 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
15970 target
= gen_reg_rtx (SImode
);
15971 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15972 op0
= copy_to_mode_reg (mode0
, op0
);
15973 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15974 op1
= copy_to_mode_reg (mode1
, op1
);
15976 scratch
= gen_reg_rtx (CCFPmode
);
15978 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
16000 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
16003 error ("argument 1 of %qs is out of range",
16004 "__builtin_paired_predicate");
16008 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
16009 emit_move_insn (target
, tmp
);
16013 /* Raise an error message for a builtin function that is called without the
16014 appropriate target options being set. */
16017 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
16019 size_t uns_fncode
= (size_t) fncode
;
16020 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
16021 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
16023 gcc_assert (name
!= NULL
);
16024 if ((fnmask
& RS6000_BTM_CELL
) != 0)
16025 error ("builtin function %qs is only valid for the cell processor", name
);
16026 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
16027 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
16028 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
16029 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
16030 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
16031 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
16032 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
16033 error ("builtin function %qs requires the %qs option", name
, "-mpaired");
16034 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16035 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16036 error ("builtin function %qs requires the %qs and %qs options",
16037 name
, "-mhard-dfp", "-mpower8-vector");
16038 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
16039 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
16040 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
16041 error ("builtin function %qs requires the %qs option", name
,
16042 "-mpower8-vector");
16043 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16044 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16045 error ("builtin function %qs requires the %qs and %qs options",
16046 name
, "-mcpu=power9", "-m64");
16047 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
16048 error ("builtin function %qs requires the %qs option", name
,
16050 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16051 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16052 error ("builtin function %qs requires the %qs and %qs options",
16053 name
, "-mcpu=power9", "-m64");
16054 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
16055 error ("builtin function %qs requires the %qs option", name
,
16057 else if ((fnmask
& (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16058 == (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16059 error ("builtin function %qs requires the %qs and %qs options",
16060 name
, "-mhard-float", "-mlong-double-128");
16061 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
16062 error ("builtin function %qs requires the %qs option", name
,
16064 else if ((fnmask
& RS6000_BTM_FLOAT128_HW
) != 0)
16065 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16067 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
16068 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
16070 error ("builtin function %qs is not supported with the current options",
16074 /* Target hook for early folding of built-ins, shamelessly stolen
16078 rs6000_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
16079 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
16081 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
16083 enum rs6000_builtins fn_code
16084 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16087 case RS6000_BUILTIN_NANQ
:
16088 case RS6000_BUILTIN_NANSQ
:
16090 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16091 const char *str
= c_getstr (*args
);
16092 int quiet
= fn_code
== RS6000_BUILTIN_NANQ
;
16093 REAL_VALUE_TYPE real
;
16095 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
16096 return build_real (type
, real
);
16099 case RS6000_BUILTIN_INFQ
:
16100 case RS6000_BUILTIN_HUGE_VALQ
:
16102 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16103 REAL_VALUE_TYPE inf
;
16105 return build_real (type
, inf
);
16111 #ifdef SUBTARGET_FOLD_BUILTIN
16112 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
16118 /* Helper function to sort out which built-ins may be valid without having
16121 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code
)
16125 case ALTIVEC_BUILTIN_STVX_V16QI
:
16126 case ALTIVEC_BUILTIN_STVX_V8HI
:
16127 case ALTIVEC_BUILTIN_STVX_V4SI
:
16128 case ALTIVEC_BUILTIN_STVX_V4SF
:
16129 case ALTIVEC_BUILTIN_STVX_V2DI
:
16130 case ALTIVEC_BUILTIN_STVX_V2DF
:
16137 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16138 a constant, use rs6000_fold_builtin.) */
16141 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
16143 gimple
*stmt
= gsi_stmt (*gsi
);
16144 tree fndecl
= gimple_call_fndecl (stmt
);
16145 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
16146 enum rs6000_builtins fn_code
16147 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16148 tree arg0
, arg1
, lhs
;
16150 size_t uns_fncode
= (size_t) fn_code
;
16151 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
16152 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
16153 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
16154 ? get_insn_name ((int) icode
)
16157 if (TARGET_DEBUG_BUILTIN
)
16158 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
16159 fn_code
, fn_name1
, fn_name2
);
16161 if (!rs6000_fold_gimple
)
16164 /* Prevent gimple folding for code that does not have a LHS, unless it is
16165 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16166 if (!gimple_call_lhs (stmt
) && !rs6000_builtin_valid_without_lhs (fn_code
))
16171 /* Flavors of vec_add. We deliberately don't expand
16172 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16173 TImode, resulting in much poorer code generation. */
16174 case ALTIVEC_BUILTIN_VADDUBM
:
16175 case ALTIVEC_BUILTIN_VADDUHM
:
16176 case ALTIVEC_BUILTIN_VADDUWM
:
16177 case P8V_BUILTIN_VADDUDM
:
16178 case ALTIVEC_BUILTIN_VADDFP
:
16179 case VSX_BUILTIN_XVADDDP
:
16181 arg0
= gimple_call_arg (stmt
, 0);
16182 arg1
= gimple_call_arg (stmt
, 1);
16183 lhs
= gimple_call_lhs (stmt
);
16184 gimple
*g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
16185 gimple_set_location (g
, gimple_location (stmt
));
16186 gsi_replace (gsi
, g
, true);
16189 /* Flavors of vec_sub. We deliberately don't expand
16190 P8V_BUILTIN_VSUBUQM. */
16191 case ALTIVEC_BUILTIN_VSUBUBM
:
16192 case ALTIVEC_BUILTIN_VSUBUHM
:
16193 case ALTIVEC_BUILTIN_VSUBUWM
:
16194 case P8V_BUILTIN_VSUBUDM
:
16195 case ALTIVEC_BUILTIN_VSUBFP
:
16196 case VSX_BUILTIN_XVSUBDP
:
16198 arg0
= gimple_call_arg (stmt
, 0);
16199 arg1
= gimple_call_arg (stmt
, 1);
16200 lhs
= gimple_call_lhs (stmt
);
16201 gimple
*g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
16202 gimple_set_location (g
, gimple_location (stmt
));
16203 gsi_replace (gsi
, g
, true);
16206 case VSX_BUILTIN_XVMULSP
:
16207 case VSX_BUILTIN_XVMULDP
:
16209 arg0
= gimple_call_arg (stmt
, 0);
16210 arg1
= gimple_call_arg (stmt
, 1);
16211 lhs
= gimple_call_lhs (stmt
);
16212 gimple
*g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
16213 gimple_set_location (g
, gimple_location (stmt
));
16214 gsi_replace (gsi
, g
, true);
16217 /* Even element flavors of vec_mul (signed). */
16218 case ALTIVEC_BUILTIN_VMULESB
:
16219 case ALTIVEC_BUILTIN_VMULESH
:
16220 case ALTIVEC_BUILTIN_VMULESW
:
16221 /* Even element flavors of vec_mul (unsigned). */
16222 case ALTIVEC_BUILTIN_VMULEUB
:
16223 case ALTIVEC_BUILTIN_VMULEUH
:
16224 case ALTIVEC_BUILTIN_VMULEUW
:
16226 arg0
= gimple_call_arg (stmt
, 0);
16227 arg1
= gimple_call_arg (stmt
, 1);
16228 lhs
= gimple_call_lhs (stmt
);
16229 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
16230 gimple_set_location (g
, gimple_location (stmt
));
16231 gsi_replace (gsi
, g
, true);
16234 /* Odd element flavors of vec_mul (signed). */
16235 case ALTIVEC_BUILTIN_VMULOSB
:
16236 case ALTIVEC_BUILTIN_VMULOSH
:
16237 case ALTIVEC_BUILTIN_VMULOSW
:
16238 /* Odd element flavors of vec_mul (unsigned). */
16239 case ALTIVEC_BUILTIN_VMULOUB
:
16240 case ALTIVEC_BUILTIN_VMULOUH
:
16241 case ALTIVEC_BUILTIN_VMULOUW
:
16243 arg0
= gimple_call_arg (stmt
, 0);
16244 arg1
= gimple_call_arg (stmt
, 1);
16245 lhs
= gimple_call_lhs (stmt
);
16246 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
16247 gimple_set_location (g
, gimple_location (stmt
));
16248 gsi_replace (gsi
, g
, true);
16251 /* Flavors of vec_div (Integer). */
16252 case VSX_BUILTIN_DIV_V2DI
:
16253 case VSX_BUILTIN_UDIV_V2DI
:
16255 arg0
= gimple_call_arg (stmt
, 0);
16256 arg1
= gimple_call_arg (stmt
, 1);
16257 lhs
= gimple_call_lhs (stmt
);
16258 gimple
*g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
16259 gimple_set_location (g
, gimple_location (stmt
));
16260 gsi_replace (gsi
, g
, true);
16263 /* Flavors of vec_div (Float). */
16264 case VSX_BUILTIN_XVDIVSP
:
16265 case VSX_BUILTIN_XVDIVDP
:
16267 arg0
= gimple_call_arg (stmt
, 0);
16268 arg1
= gimple_call_arg (stmt
, 1);
16269 lhs
= gimple_call_lhs (stmt
);
16270 gimple
*g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
16271 gimple_set_location (g
, gimple_location (stmt
));
16272 gsi_replace (gsi
, g
, true);
16275 /* Flavors of vec_and. */
16276 case ALTIVEC_BUILTIN_VAND
:
16278 arg0
= gimple_call_arg (stmt
, 0);
16279 arg1
= gimple_call_arg (stmt
, 1);
16280 lhs
= gimple_call_lhs (stmt
);
16281 gimple
*g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
16282 gimple_set_location (g
, gimple_location (stmt
));
16283 gsi_replace (gsi
, g
, true);
16286 /* Flavors of vec_andc. */
16287 case ALTIVEC_BUILTIN_VANDC
:
16289 arg0
= gimple_call_arg (stmt
, 0);
16290 arg1
= gimple_call_arg (stmt
, 1);
16291 lhs
= gimple_call_lhs (stmt
);
16292 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16293 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16294 gimple_set_location (g
, gimple_location (stmt
));
16295 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16296 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
16297 gimple_set_location (g
, gimple_location (stmt
));
16298 gsi_replace (gsi
, g
, true);
16301 /* Flavors of vec_nand. */
16302 case P8V_BUILTIN_VEC_NAND
:
16303 case P8V_BUILTIN_NAND_V16QI
:
16304 case P8V_BUILTIN_NAND_V8HI
:
16305 case P8V_BUILTIN_NAND_V4SI
:
16306 case P8V_BUILTIN_NAND_V4SF
:
16307 case P8V_BUILTIN_NAND_V2DF
:
16308 case P8V_BUILTIN_NAND_V2DI
:
16310 arg0
= gimple_call_arg (stmt
, 0);
16311 arg1
= gimple_call_arg (stmt
, 1);
16312 lhs
= gimple_call_lhs (stmt
);
16313 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16314 gimple
*g
= gimple_build_assign(temp
, BIT_AND_EXPR
, arg0
, arg1
);
16315 gimple_set_location (g
, gimple_location (stmt
));
16316 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16317 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16318 gimple_set_location (g
, gimple_location (stmt
));
16319 gsi_replace (gsi
, g
, true);
16322 /* Flavors of vec_or. */
16323 case ALTIVEC_BUILTIN_VOR
:
16325 arg0
= gimple_call_arg (stmt
, 0);
16326 arg1
= gimple_call_arg (stmt
, 1);
16327 lhs
= gimple_call_lhs (stmt
);
16328 gimple
*g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
16329 gimple_set_location (g
, gimple_location (stmt
));
16330 gsi_replace (gsi
, g
, true);
16333 /* flavors of vec_orc. */
16334 case P8V_BUILTIN_ORC_V16QI
:
16335 case P8V_BUILTIN_ORC_V8HI
:
16336 case P8V_BUILTIN_ORC_V4SI
:
16337 case P8V_BUILTIN_ORC_V4SF
:
16338 case P8V_BUILTIN_ORC_V2DF
:
16339 case P8V_BUILTIN_ORC_V2DI
:
16341 arg0
= gimple_call_arg (stmt
, 0);
16342 arg1
= gimple_call_arg (stmt
, 1);
16343 lhs
= gimple_call_lhs (stmt
);
16344 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16345 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16346 gimple_set_location (g
, gimple_location (stmt
));
16347 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16348 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
16349 gimple_set_location (g
, gimple_location (stmt
));
16350 gsi_replace (gsi
, g
, true);
16353 /* Flavors of vec_xor. */
16354 case ALTIVEC_BUILTIN_VXOR
:
16356 arg0
= gimple_call_arg (stmt
, 0);
16357 arg1
= gimple_call_arg (stmt
, 1);
16358 lhs
= gimple_call_lhs (stmt
);
16359 gimple
*g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
16360 gimple_set_location (g
, gimple_location (stmt
));
16361 gsi_replace (gsi
, g
, true);
16364 /* Flavors of vec_nor. */
16365 case ALTIVEC_BUILTIN_VNOR
:
16367 arg0
= gimple_call_arg (stmt
, 0);
16368 arg1
= gimple_call_arg (stmt
, 1);
16369 lhs
= gimple_call_lhs (stmt
);
16370 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16371 gimple
*g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
16372 gimple_set_location (g
, gimple_location (stmt
));
16373 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16374 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16375 gimple_set_location (g
, gimple_location (stmt
));
16376 gsi_replace (gsi
, g
, true);
16379 /* flavors of vec_abs. */
16380 case ALTIVEC_BUILTIN_ABS_V16QI
:
16381 case ALTIVEC_BUILTIN_ABS_V8HI
:
16382 case ALTIVEC_BUILTIN_ABS_V4SI
:
16383 case ALTIVEC_BUILTIN_ABS_V4SF
:
16384 case P8V_BUILTIN_ABS_V2DI
:
16385 case VSX_BUILTIN_XVABSDP
:
16387 arg0
= gimple_call_arg (stmt
, 0);
16388 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16389 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16391 lhs
= gimple_call_lhs (stmt
);
16392 gimple
*g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
16393 gimple_set_location (g
, gimple_location (stmt
));
16394 gsi_replace (gsi
, g
, true);
16397 /* flavors of vec_min. */
16398 case VSX_BUILTIN_XVMINDP
:
16399 case P8V_BUILTIN_VMINSD
:
16400 case P8V_BUILTIN_VMINUD
:
16401 case ALTIVEC_BUILTIN_VMINSB
:
16402 case ALTIVEC_BUILTIN_VMINSH
:
16403 case ALTIVEC_BUILTIN_VMINSW
:
16404 case ALTIVEC_BUILTIN_VMINUB
:
16405 case ALTIVEC_BUILTIN_VMINUH
:
16406 case ALTIVEC_BUILTIN_VMINUW
:
16407 case ALTIVEC_BUILTIN_VMINFP
:
16409 arg0
= gimple_call_arg (stmt
, 0);
16410 arg1
= gimple_call_arg (stmt
, 1);
16411 lhs
= gimple_call_lhs (stmt
);
16412 gimple
*g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
16413 gimple_set_location (g
, gimple_location (stmt
));
16414 gsi_replace (gsi
, g
, true);
16417 /* flavors of vec_max. */
16418 case VSX_BUILTIN_XVMAXDP
:
16419 case P8V_BUILTIN_VMAXSD
:
16420 case P8V_BUILTIN_VMAXUD
:
16421 case ALTIVEC_BUILTIN_VMAXSB
:
16422 case ALTIVEC_BUILTIN_VMAXSH
:
16423 case ALTIVEC_BUILTIN_VMAXSW
:
16424 case ALTIVEC_BUILTIN_VMAXUB
:
16425 case ALTIVEC_BUILTIN_VMAXUH
:
16426 case ALTIVEC_BUILTIN_VMAXUW
:
16427 case ALTIVEC_BUILTIN_VMAXFP
:
16429 arg0
= gimple_call_arg (stmt
, 0);
16430 arg1
= gimple_call_arg (stmt
, 1);
16431 lhs
= gimple_call_lhs (stmt
);
16432 gimple
*g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
16433 gimple_set_location (g
, gimple_location (stmt
));
16434 gsi_replace (gsi
, g
, true);
16437 /* Flavors of vec_eqv. */
16438 case P8V_BUILTIN_EQV_V16QI
:
16439 case P8V_BUILTIN_EQV_V8HI
:
16440 case P8V_BUILTIN_EQV_V4SI
:
16441 case P8V_BUILTIN_EQV_V4SF
:
16442 case P8V_BUILTIN_EQV_V2DF
:
16443 case P8V_BUILTIN_EQV_V2DI
:
16445 arg0
= gimple_call_arg (stmt
, 0);
16446 arg1
= gimple_call_arg (stmt
, 1);
16447 lhs
= gimple_call_lhs (stmt
);
16448 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16449 gimple
*g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
16450 gimple_set_location (g
, gimple_location (stmt
));
16451 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16452 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16453 gimple_set_location (g
, gimple_location (stmt
));
16454 gsi_replace (gsi
, g
, true);
16457 /* Flavors of vec_rotate_left. */
16458 case ALTIVEC_BUILTIN_VRLB
:
16459 case ALTIVEC_BUILTIN_VRLH
:
16460 case ALTIVEC_BUILTIN_VRLW
:
16461 case P8V_BUILTIN_VRLD
:
16463 arg0
= gimple_call_arg (stmt
, 0);
16464 arg1
= gimple_call_arg (stmt
, 1);
16465 lhs
= gimple_call_lhs (stmt
);
16466 gimple
*g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
16467 gimple_set_location (g
, gimple_location (stmt
));
16468 gsi_replace (gsi
, g
, true);
16471 /* Flavors of vector shift right algebraic.
16472 vec_sra{b,h,w} -> vsra{b,h,w}. */
16473 case ALTIVEC_BUILTIN_VSRAB
:
16474 case ALTIVEC_BUILTIN_VSRAH
:
16475 case ALTIVEC_BUILTIN_VSRAW
:
16476 case P8V_BUILTIN_VSRAD
:
16478 arg0
= gimple_call_arg (stmt
, 0);
16479 arg1
= gimple_call_arg (stmt
, 1);
16480 lhs
= gimple_call_lhs (stmt
);
16481 gimple
*g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
16482 gimple_set_location (g
, gimple_location (stmt
));
16483 gsi_replace (gsi
, g
, true);
16486 /* Flavors of vector shift left.
16487 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16488 case ALTIVEC_BUILTIN_VSLB
:
16489 case ALTIVEC_BUILTIN_VSLH
:
16490 case ALTIVEC_BUILTIN_VSLW
:
16491 case P8V_BUILTIN_VSLD
:
16493 arg0
= gimple_call_arg (stmt
, 0);
16494 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16495 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16497 arg1
= gimple_call_arg (stmt
, 1);
16498 lhs
= gimple_call_lhs (stmt
);
16499 gimple
*g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, arg1
);
16500 gimple_set_location (g
, gimple_location (stmt
));
16501 gsi_replace (gsi
, g
, true);
16504 /* Flavors of vector shift right. */
16505 case ALTIVEC_BUILTIN_VSRB
:
16506 case ALTIVEC_BUILTIN_VSRH
:
16507 case ALTIVEC_BUILTIN_VSRW
:
16508 case P8V_BUILTIN_VSRD
:
16510 arg0
= gimple_call_arg (stmt
, 0);
16511 arg1
= gimple_call_arg (stmt
, 1);
16512 lhs
= gimple_call_lhs (stmt
);
16513 gimple_seq stmts
= NULL
;
16514 /* Convert arg0 to unsigned. */
16516 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
16517 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
16519 = gimple_build (&stmts
, RSHIFT_EXPR
,
16520 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
16521 /* Convert result back to the lhs type. */
16522 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
16523 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16524 update_call_from_tree (gsi
, res
);
16527 /* Vector loads. */
16528 case ALTIVEC_BUILTIN_LVX_V16QI
:
16529 case ALTIVEC_BUILTIN_LVX_V8HI
:
16530 case ALTIVEC_BUILTIN_LVX_V4SI
:
16531 case ALTIVEC_BUILTIN_LVX_V4SF
:
16532 case ALTIVEC_BUILTIN_LVX_V2DI
:
16533 case ALTIVEC_BUILTIN_LVX_V2DF
:
16535 arg0
= gimple_call_arg (stmt
, 0); // offset
16536 arg1
= gimple_call_arg (stmt
, 1); // address
16537 /* Do not fold for -maltivec=be on LE targets. */
16538 if (VECTOR_ELT_ORDER_BIG
&& !BYTES_BIG_ENDIAN
)
16540 lhs
= gimple_call_lhs (stmt
);
16541 location_t loc
= gimple_location (stmt
);
16542 /* Since arg1 may be cast to a different type, just use ptr_type_node
16543 here instead of trying to enforce TBAA on pointer types. */
16544 tree arg1_type
= ptr_type_node
;
16545 tree lhs_type
= TREE_TYPE (lhs
);
16546 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16547 the tree using the value from arg0. The resulting type will match
16548 the type of arg1. */
16549 gimple_seq stmts
= NULL
;
16550 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg0
);
16551 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
16552 arg1_type
, arg1
, temp_offset
);
16553 /* Mask off any lower bits from the address. */
16554 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
16555 arg1_type
, temp_addr
,
16556 build_int_cst (arg1_type
, -16));
16557 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16558 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16559 take an offset, but since we've already incorporated the offset
16560 above, here we just pass in a zero. */
16562 g
= gimple_build_assign (lhs
, build2 (MEM_REF
, lhs_type
, aligned_addr
,
16563 build_int_cst (arg1_type
, 0)));
16564 gimple_set_location (g
, loc
);
16565 gsi_replace (gsi
, g
, true);
16568 /* Vector stores. */
16569 case ALTIVEC_BUILTIN_STVX_V16QI
:
16570 case ALTIVEC_BUILTIN_STVX_V8HI
:
16571 case ALTIVEC_BUILTIN_STVX_V4SI
:
16572 case ALTIVEC_BUILTIN_STVX_V4SF
:
16573 case ALTIVEC_BUILTIN_STVX_V2DI
:
16574 case ALTIVEC_BUILTIN_STVX_V2DF
:
16576 /* Do not fold for -maltivec=be on LE targets. */
16577 if (VECTOR_ELT_ORDER_BIG
&& !BYTES_BIG_ENDIAN
)
16579 arg0
= gimple_call_arg (stmt
, 0); /* Value to be stored. */
16580 arg1
= gimple_call_arg (stmt
, 1); /* Offset. */
16581 tree arg2
= gimple_call_arg (stmt
, 2); /* Store-to address. */
16582 location_t loc
= gimple_location (stmt
);
16583 tree arg0_type
= TREE_TYPE (arg0
);
16584 /* Use ptr_type_node (no TBAA) for the arg2_type.
16585 FIXME: (Richard) "A proper fix would be to transition this type as
16586 seen from the frontend to GIMPLE, for example in a similar way we
16587 do for MEM_REFs by piggy-backing that on an extra argument, a
16588 constant zero pointer of the alias pointer type to use (which would
16589 also serve as a type indicator of the store itself). I'd use a
16590 target specific internal function for this (not sure if we can have
16591 those target specific, but I guess if it's folded away then that's
16592 fine) and get away with the overload set."
16594 tree arg2_type
= ptr_type_node
;
16595 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16596 the tree using the value from arg0. The resulting type will match
16597 the type of arg2. */
16598 gimple_seq stmts
= NULL
;
16599 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg1
);
16600 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
16601 arg2_type
, arg2
, temp_offset
);
16602 /* Mask off any lower bits from the address. */
16603 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
16604 arg2_type
, temp_addr
,
16605 build_int_cst (arg2_type
, -16));
16606 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16607 /* The desired gimple result should be similar to:
16608 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16610 g
= gimple_build_assign (build2 (MEM_REF
, arg0_type
, aligned_addr
,
16611 build_int_cst (arg2_type
, 0)), arg0
);
16612 gimple_set_location (g
, loc
);
16613 gsi_replace (gsi
, g
, true);
16617 if (TARGET_DEBUG_BUILTIN
)
16618 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16619 fn_code
, fn_name1
, fn_name2
);
16626 /* Expand an expression EXP that calls a built-in function,
16627 with result going to TARGET if that's convenient
16628 (and in mode MODE if that's convenient).
16629 SUBTARGET may be used as the target for computing one of EXP's operands.
16630 IGNORE is nonzero if the value is to be ignored. */
16633 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16634 machine_mode mode ATTRIBUTE_UNUSED
,
16635 int ignore ATTRIBUTE_UNUSED
)
16637 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16638 enum rs6000_builtins fcode
16639 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16640 size_t uns_fcode
= (size_t)fcode
;
16641 const struct builtin_description
*d
;
16645 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16646 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16648 if (TARGET_DEBUG_BUILTIN
)
16650 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16651 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16652 const char *name2
= (icode
!= CODE_FOR_nothing
)
16653 ? get_insn_name ((int) icode
)
16657 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16659 default: name3
= "unknown"; break;
16660 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16661 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16662 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16663 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16664 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16665 case RS6000_BTC_ABS
: name3
= "abs"; break;
16666 case RS6000_BTC_DST
: name3
= "dst"; break;
16671 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16672 (name1
) ? name1
: "---", fcode
,
16673 (name2
) ? name2
: "---", (int) icode
,
16675 func_valid_p
? "" : ", not valid");
16680 rs6000_invalid_builtin (fcode
);
16682 /* Given it is invalid, just generate a normal call. */
16683 return expand_call (exp
, target
, ignore
);
16688 case RS6000_BUILTIN_RECIP
:
16689 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16691 case RS6000_BUILTIN_RECIPF
:
16692 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16694 case RS6000_BUILTIN_RSQRTF
:
16695 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16697 case RS6000_BUILTIN_RSQRT
:
16698 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16700 case POWER7_BUILTIN_BPERMD
:
16701 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16702 ? CODE_FOR_bpermd_di
16703 : CODE_FOR_bpermd_si
), exp
, target
);
16705 case RS6000_BUILTIN_GET_TB
:
16706 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16709 case RS6000_BUILTIN_MFTB
:
16710 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16711 ? CODE_FOR_rs6000_mftb_di
16712 : CODE_FOR_rs6000_mftb_si
),
16715 case RS6000_BUILTIN_MFFS
:
16716 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16718 case RS6000_BUILTIN_MTFSF
:
16719 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16721 case RS6000_BUILTIN_CPU_INIT
:
16722 case RS6000_BUILTIN_CPU_IS
:
16723 case RS6000_BUILTIN_CPU_SUPPORTS
:
16724 return cpu_expand_builtin (fcode
, exp
, target
);
16726 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16727 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16729 int icode
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16730 : (int) CODE_FOR_altivec_lvsl_direct
);
16731 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
16732 machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
16736 gcc_assert (TARGET_ALTIVEC
);
16738 arg
= CALL_EXPR_ARG (exp
, 0);
16739 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16740 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16741 addr
= memory_address (mode
, op
);
16742 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16746 /* For the load case need to negate the address. */
16747 op
= gen_reg_rtx (GET_MODE (addr
));
16748 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16750 op
= gen_rtx_MEM (mode
, op
);
16753 || GET_MODE (target
) != tmode
16754 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
16755 target
= gen_reg_rtx (tmode
);
16757 pat
= GEN_FCN (icode
) (target
, op
);
16765 case ALTIVEC_BUILTIN_VCFUX
:
16766 case ALTIVEC_BUILTIN_VCFSX
:
16767 case ALTIVEC_BUILTIN_VCTUXS
:
16768 case ALTIVEC_BUILTIN_VCTSXS
:
16769 /* FIXME: There's got to be a nicer way to handle this case than
16770 constructing a new CALL_EXPR. */
16771 if (call_expr_nargs (exp
) == 1)
16773 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16774 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16782 if (TARGET_ALTIVEC
)
16784 ret
= altivec_expand_builtin (exp
, target
, &success
);
16789 if (TARGET_PAIRED_FLOAT
)
16791 ret
= paired_expand_builtin (exp
, target
, &success
);
16798 ret
= htm_expand_builtin (exp
, target
, &success
);
16804 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16805 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16806 gcc_assert (attr
== RS6000_BTC_UNARY
16807 || attr
== RS6000_BTC_BINARY
16808 || attr
== RS6000_BTC_TERNARY
16809 || attr
== RS6000_BTC_SPECIAL
);
16811 /* Handle simple unary operations. */
16813 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16814 if (d
->code
== fcode
)
16815 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
16817 /* Handle simple binary operations. */
16819 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16820 if (d
->code
== fcode
)
16821 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
16823 /* Handle simple ternary operations. */
16825 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16826 if (d
->code
== fcode
)
16827 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
16829 /* Handle simple no-argument operations. */
16831 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16832 if (d
->code
== fcode
)
16833 return rs6000_expand_zeroop_builtin (d
->icode
, target
);
16835 gcc_unreachable ();
16838 /* Create a builtin vector type with a name. Taking care not to give
16839 the canonical type a name. */
16842 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16844 tree result
= build_vector_type (elt_type
, num_elts
);
16846 /* Copy so we don't give the canonical type a name. */
16847 result
= build_variant_type_copy (result
);
16849 add_builtin_type (name
, result
);
16855 rs6000_init_builtins (void)
16861 if (TARGET_DEBUG_BUILTIN
)
16862 fprintf (stderr
, "rs6000_init_builtins%s%s%s\n",
16863 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
16864 (TARGET_ALTIVEC
) ? ", altivec" : "",
16865 (TARGET_VSX
) ? ", vsx" : "");
16867 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
16868 V2SF_type_node
= build_vector_type (float_type_node
, 2);
16869 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16870 : "__vector long long",
16871 intDI_type_node
, 2);
16872 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16873 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16874 intSI_type_node
, 4);
16875 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16876 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16877 intHI_type_node
, 8);
16878 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16879 intQI_type_node
, 16);
16881 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16882 unsigned_intQI_type_node
, 16);
16883 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16884 unsigned_intHI_type_node
, 8);
16885 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16886 unsigned_intSI_type_node
, 4);
16887 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16888 ? "__vector unsigned long"
16889 : "__vector unsigned long long",
16890 unsigned_intDI_type_node
, 2);
16892 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
16893 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
16894 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
16895 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16897 const_str_type_node
16898 = build_pointer_type (build_qualified_type (char_type_node
,
16901 /* We use V1TI mode as a special container to hold __int128_t items that
16902 must live in VSX registers. */
16903 if (intTI_type_node
)
16905 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16906 intTI_type_node
, 1);
16907 unsigned_V1TI_type_node
16908 = rs6000_vector_type ("__vector unsigned __int128",
16909 unsigned_intTI_type_node
, 1);
16912 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16913 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16914 'vector unsigned short'. */
16916 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16917 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16918 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16919 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16920 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16922 long_integer_type_internal_node
= long_integer_type_node
;
16923 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16924 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16925 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16926 intQI_type_internal_node
= intQI_type_node
;
16927 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16928 intHI_type_internal_node
= intHI_type_node
;
16929 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16930 intSI_type_internal_node
= intSI_type_node
;
16931 uintSI_type_internal_node
= unsigned_intSI_type_node
;
16932 intDI_type_internal_node
= intDI_type_node
;
16933 uintDI_type_internal_node
= unsigned_intDI_type_node
;
16934 intTI_type_internal_node
= intTI_type_node
;
16935 uintTI_type_internal_node
= unsigned_intTI_type_node
;
16936 float_type_internal_node
= float_type_node
;
16937 double_type_internal_node
= double_type_node
;
16938 long_double_type_internal_node
= long_double_type_node
;
16939 dfloat64_type_internal_node
= dfloat64_type_node
;
16940 dfloat128_type_internal_node
= dfloat128_type_node
;
16941 void_type_internal_node
= void_type_node
;
16943 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16944 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16945 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16946 format that uses a pair of doubles, depending on the switches and
16949 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16950 floating point, we need make sure the type is non-zero or else self-test
16951 fails during bootstrap.
16953 We don't register a built-in type for __ibm128 if the type is the same as
16954 long double. Instead we add a #define for __ibm128 in
16955 rs6000_cpu_cpp_builtins to long double.
16957 For IEEE 128-bit floating point, always create the type __ieee128. If the
16958 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16960 if (TARGET_LONG_DOUBLE_128
&& FLOAT128_IEEE_P (TFmode
))
16962 ibm128_float_type_node
= make_node (REAL_TYPE
);
16963 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16964 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
16965 layout_type (ibm128_float_type_node
);
16967 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
16971 ibm128_float_type_node
= long_double_type_node
;
16973 if (TARGET_FLOAT128_TYPE
)
16975 ieee128_float_type_node
= float128_type_node
;
16976 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16981 ieee128_float_type_node
= long_double_type_node
;
16983 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16985 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
16986 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
16987 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
16988 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
16989 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
16990 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
16991 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
16992 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
16993 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
16994 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
16995 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
16996 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
16997 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
16998 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
16999 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
17000 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
17001 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
17002 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
17003 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
17004 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
17005 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
17006 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
17007 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
17008 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
17009 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
17010 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
17011 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
17012 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
17013 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
17015 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
17016 TYPE_NAME (bool_char_type_node
) = tdecl
;
17018 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
17019 TYPE_NAME (bool_short_type_node
) = tdecl
;
17021 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
17022 TYPE_NAME (bool_int_type_node
) = tdecl
;
17024 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
17025 TYPE_NAME (pixel_type_node
) = tdecl
;
17027 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
17028 bool_char_type_node
, 16);
17029 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
17030 bool_short_type_node
, 8);
17031 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
17032 bool_int_type_node
, 4);
17033 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
17034 ? "__vector __bool long"
17035 : "__vector __bool long long",
17036 bool_long_type_node
, 2);
17037 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
17038 pixel_type_node
, 8);
17040 /* Paired builtins are only available if you build a compiler with the
17041 appropriate options, so only create those builtins with the appropriate
17042 compiler option. Create Altivec and VSX builtins on machines with at
17043 least the general purpose extensions (970 and newer) to allow the use of
17044 the target attribute. */
17045 if (TARGET_PAIRED_FLOAT
)
17046 paired_init_builtins ();
17047 if (TARGET_EXTRA_BUILTINS
)
17048 altivec_init_builtins ();
17050 htm_init_builtins ();
17052 if (TARGET_EXTRA_BUILTINS
|| TARGET_PAIRED_FLOAT
)
17053 rs6000_common_init_builtins ();
17055 ftype
= build_function_type_list (ieee128_float_type_node
,
17056 const_str_type_node
, NULL_TREE
);
17057 def_builtin ("__builtin_nanq", ftype
, RS6000_BUILTIN_NANQ
);
17058 def_builtin ("__builtin_nansq", ftype
, RS6000_BUILTIN_NANSQ
);
17060 ftype
= build_function_type_list (ieee128_float_type_node
, NULL_TREE
);
17061 def_builtin ("__builtin_infq", ftype
, RS6000_BUILTIN_INFQ
);
17062 def_builtin ("__builtin_huge_valq", ftype
, RS6000_BUILTIN_HUGE_VALQ
);
17064 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
17065 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
17066 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
17068 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
17069 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
17070 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
17072 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
17073 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
17074 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
17076 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
17077 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
17078 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
17080 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
17081 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
17082 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
17083 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
17085 ftype
= build_function_type_list (unsigned_intDI_type_node
,
17087 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
17090 ftype
= build_function_type_list (unsigned_intDI_type_node
,
17093 ftype
= build_function_type_list (unsigned_intSI_type_node
,
17095 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
17097 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
17098 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
17100 ftype
= build_function_type_list (void_type_node
,
17101 intSI_type_node
, double_type_node
,
17103 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
17105 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
17106 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
17108 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
17110 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
17111 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
17113 /* AIX libm provides clog as __clog. */
17114 if (TARGET_XCOFF
&&
17115 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
17116 set_user_assembler_name (tdecl
, "__clog");
17118 #ifdef SUBTARGET_INIT_BUILTINS
17119 SUBTARGET_INIT_BUILTINS
;
17123 /* Returns the rs6000 builtin decl for CODE. */
17126 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
17128 HOST_WIDE_INT fnmask
;
17130 if (code
>= RS6000_BUILTIN_COUNT
)
17131 return error_mark_node
;
17133 fnmask
= rs6000_builtin_info
[code
].mask
;
17134 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
17136 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
17137 return error_mark_node
;
17140 return rs6000_builtin_decls
[code
];
17144 paired_init_builtins (void)
17146 const struct builtin_description
*d
;
17148 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17150 tree int_ftype_int_v2sf_v2sf
17151 = build_function_type_list (integer_type_node
,
17156 tree pcfloat_type_node
=
17157 build_pointer_type (build_qualified_type
17158 (float_type_node
, TYPE_QUAL_CONST
));
17160 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
17161 long_integer_type_node
,
17164 tree void_ftype_v2sf_long_pcfloat
=
17165 build_function_type_list (void_type_node
,
17167 long_integer_type_node
,
17172 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
17173 PAIRED_BUILTIN_LX
);
17176 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
17177 PAIRED_BUILTIN_STX
);
17180 d
= bdesc_paired_preds
;
17181 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
17184 HOST_WIDE_INT mask
= d
->mask
;
17186 if ((mask
& builtin_mask
) != mask
)
17188 if (TARGET_DEBUG_BUILTIN
)
17189 fprintf (stderr
, "paired_init_builtins, skip predicate %s\n",
17194 /* Cannot define builtin if the instruction is disabled. */
17195 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17197 if (TARGET_DEBUG_BUILTIN
)
17198 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
17199 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
17200 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
17202 switch (insn_data
[d
->icode
].operand
[1].mode
)
17205 type
= int_ftype_int_v2sf_v2sf
;
17208 gcc_unreachable ();
17211 def_builtin (d
->name
, type
, d
->code
);
17216 altivec_init_builtins (void)
17218 const struct builtin_description
*d
;
17222 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17224 tree pvoid_type_node
= build_pointer_type (void_type_node
);
17226 tree pcvoid_type_node
17227 = build_pointer_type (build_qualified_type (void_type_node
,
17230 tree int_ftype_opaque
17231 = build_function_type_list (integer_type_node
,
17232 opaque_V4SI_type_node
, NULL_TREE
);
17233 tree opaque_ftype_opaque
17234 = build_function_type_list (integer_type_node
, NULL_TREE
);
17235 tree opaque_ftype_opaque_int
17236 = build_function_type_list (opaque_V4SI_type_node
,
17237 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
17238 tree opaque_ftype_opaque_opaque_int
17239 = build_function_type_list (opaque_V4SI_type_node
,
17240 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17241 integer_type_node
, NULL_TREE
);
17242 tree opaque_ftype_opaque_opaque_opaque
17243 = build_function_type_list (opaque_V4SI_type_node
,
17244 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17245 opaque_V4SI_type_node
, NULL_TREE
);
17246 tree opaque_ftype_opaque_opaque
17247 = build_function_type_list (opaque_V4SI_type_node
,
17248 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17250 tree int_ftype_int_opaque_opaque
17251 = build_function_type_list (integer_type_node
,
17252 integer_type_node
, opaque_V4SI_type_node
,
17253 opaque_V4SI_type_node
, NULL_TREE
);
17254 tree int_ftype_int_v4si_v4si
17255 = build_function_type_list (integer_type_node
,
17256 integer_type_node
, V4SI_type_node
,
17257 V4SI_type_node
, NULL_TREE
);
17258 tree int_ftype_int_v2di_v2di
17259 = build_function_type_list (integer_type_node
,
17260 integer_type_node
, V2DI_type_node
,
17261 V2DI_type_node
, NULL_TREE
);
17262 tree void_ftype_v4si
17263 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
17264 tree v8hi_ftype_void
17265 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
17266 tree void_ftype_void
17267 = build_function_type_list (void_type_node
, NULL_TREE
);
17268 tree void_ftype_int
17269 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
17271 tree opaque_ftype_long_pcvoid
17272 = build_function_type_list (opaque_V4SI_type_node
,
17273 long_integer_type_node
, pcvoid_type_node
,
17275 tree v16qi_ftype_long_pcvoid
17276 = build_function_type_list (V16QI_type_node
,
17277 long_integer_type_node
, pcvoid_type_node
,
17279 tree v8hi_ftype_long_pcvoid
17280 = build_function_type_list (V8HI_type_node
,
17281 long_integer_type_node
, pcvoid_type_node
,
17283 tree v4si_ftype_long_pcvoid
17284 = build_function_type_list (V4SI_type_node
,
17285 long_integer_type_node
, pcvoid_type_node
,
17287 tree v4sf_ftype_long_pcvoid
17288 = build_function_type_list (V4SF_type_node
,
17289 long_integer_type_node
, pcvoid_type_node
,
17291 tree v2df_ftype_long_pcvoid
17292 = build_function_type_list (V2DF_type_node
,
17293 long_integer_type_node
, pcvoid_type_node
,
17295 tree v2di_ftype_long_pcvoid
17296 = build_function_type_list (V2DI_type_node
,
17297 long_integer_type_node
, pcvoid_type_node
,
17300 tree void_ftype_opaque_long_pvoid
17301 = build_function_type_list (void_type_node
,
17302 opaque_V4SI_type_node
, long_integer_type_node
,
17303 pvoid_type_node
, NULL_TREE
);
17304 tree void_ftype_v4si_long_pvoid
17305 = build_function_type_list (void_type_node
,
17306 V4SI_type_node
, long_integer_type_node
,
17307 pvoid_type_node
, NULL_TREE
);
17308 tree void_ftype_v16qi_long_pvoid
17309 = build_function_type_list (void_type_node
,
17310 V16QI_type_node
, long_integer_type_node
,
17311 pvoid_type_node
, NULL_TREE
);
17313 tree void_ftype_v16qi_pvoid_long
17314 = build_function_type_list (void_type_node
,
17315 V16QI_type_node
, pvoid_type_node
,
17316 long_integer_type_node
, NULL_TREE
);
17318 tree void_ftype_v8hi_long_pvoid
17319 = build_function_type_list (void_type_node
,
17320 V8HI_type_node
, long_integer_type_node
,
17321 pvoid_type_node
, NULL_TREE
);
17322 tree void_ftype_v4sf_long_pvoid
17323 = build_function_type_list (void_type_node
,
17324 V4SF_type_node
, long_integer_type_node
,
17325 pvoid_type_node
, NULL_TREE
);
17326 tree void_ftype_v2df_long_pvoid
17327 = build_function_type_list (void_type_node
,
17328 V2DF_type_node
, long_integer_type_node
,
17329 pvoid_type_node
, NULL_TREE
);
17330 tree void_ftype_v2di_long_pvoid
17331 = build_function_type_list (void_type_node
,
17332 V2DI_type_node
, long_integer_type_node
,
17333 pvoid_type_node
, NULL_TREE
);
17334 tree int_ftype_int_v8hi_v8hi
17335 = build_function_type_list (integer_type_node
,
17336 integer_type_node
, V8HI_type_node
,
17337 V8HI_type_node
, NULL_TREE
);
17338 tree int_ftype_int_v16qi_v16qi
17339 = build_function_type_list (integer_type_node
,
17340 integer_type_node
, V16QI_type_node
,
17341 V16QI_type_node
, NULL_TREE
);
17342 tree int_ftype_int_v4sf_v4sf
17343 = build_function_type_list (integer_type_node
,
17344 integer_type_node
, V4SF_type_node
,
17345 V4SF_type_node
, NULL_TREE
);
17346 tree int_ftype_int_v2df_v2df
17347 = build_function_type_list (integer_type_node
,
17348 integer_type_node
, V2DF_type_node
,
17349 V2DF_type_node
, NULL_TREE
);
17350 tree v2di_ftype_v2di
17351 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
17352 tree v4si_ftype_v4si
17353 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
17354 tree v8hi_ftype_v8hi
17355 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
17356 tree v16qi_ftype_v16qi
17357 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
17358 tree v4sf_ftype_v4sf
17359 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
17360 tree v2df_ftype_v2df
17361 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
17362 tree void_ftype_pcvoid_int_int
17363 = build_function_type_list (void_type_node
,
17364 pcvoid_type_node
, integer_type_node
,
17365 integer_type_node
, NULL_TREE
);
17367 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
17368 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
17369 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
17370 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
17371 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
17372 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
17373 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
17374 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
17375 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
17376 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
17377 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
17378 ALTIVEC_BUILTIN_LVXL_V2DF
);
17379 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
17380 ALTIVEC_BUILTIN_LVXL_V2DI
);
17381 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
17382 ALTIVEC_BUILTIN_LVXL_V4SF
);
17383 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
17384 ALTIVEC_BUILTIN_LVXL_V4SI
);
17385 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
17386 ALTIVEC_BUILTIN_LVXL_V8HI
);
17387 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
17388 ALTIVEC_BUILTIN_LVXL_V16QI
);
17389 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
17390 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
17391 ALTIVEC_BUILTIN_LVX_V2DF
);
17392 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
17393 ALTIVEC_BUILTIN_LVX_V2DI
);
17394 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
17395 ALTIVEC_BUILTIN_LVX_V4SF
);
17396 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
17397 ALTIVEC_BUILTIN_LVX_V4SI
);
17398 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
17399 ALTIVEC_BUILTIN_LVX_V8HI
);
17400 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
17401 ALTIVEC_BUILTIN_LVX_V16QI
);
17402 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
17403 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
17404 ALTIVEC_BUILTIN_STVX_V2DF
);
17405 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
17406 ALTIVEC_BUILTIN_STVX_V2DI
);
17407 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
17408 ALTIVEC_BUILTIN_STVX_V4SF
);
17409 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
17410 ALTIVEC_BUILTIN_STVX_V4SI
);
17411 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
17412 ALTIVEC_BUILTIN_STVX_V8HI
);
17413 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
17414 ALTIVEC_BUILTIN_STVX_V16QI
);
17415 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
17416 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
17417 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
17418 ALTIVEC_BUILTIN_STVXL_V2DF
);
17419 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
17420 ALTIVEC_BUILTIN_STVXL_V2DI
);
17421 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
17422 ALTIVEC_BUILTIN_STVXL_V4SF
);
17423 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
17424 ALTIVEC_BUILTIN_STVXL_V4SI
);
17425 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
17426 ALTIVEC_BUILTIN_STVXL_V8HI
);
17427 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
17428 ALTIVEC_BUILTIN_STVXL_V16QI
);
17429 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
17430 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
17431 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
17432 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
17433 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
17434 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
17435 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
17436 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
17437 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
17438 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
17439 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
17440 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
17441 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
17442 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
17443 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
17444 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
17446 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
17447 VSX_BUILTIN_LXVD2X_V2DF
);
17448 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
17449 VSX_BUILTIN_LXVD2X_V2DI
);
17450 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
17451 VSX_BUILTIN_LXVW4X_V4SF
);
17452 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
17453 VSX_BUILTIN_LXVW4X_V4SI
);
17454 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
17455 VSX_BUILTIN_LXVW4X_V8HI
);
17456 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
17457 VSX_BUILTIN_LXVW4X_V16QI
);
17458 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
17459 VSX_BUILTIN_STXVD2X_V2DF
);
17460 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
17461 VSX_BUILTIN_STXVD2X_V2DI
);
17462 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
17463 VSX_BUILTIN_STXVW4X_V4SF
);
17464 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
17465 VSX_BUILTIN_STXVW4X_V4SI
);
17466 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
17467 VSX_BUILTIN_STXVW4X_V8HI
);
17468 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
17469 VSX_BUILTIN_STXVW4X_V16QI
);
17471 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
17472 VSX_BUILTIN_LD_ELEMREV_V2DF
);
17473 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
17474 VSX_BUILTIN_LD_ELEMREV_V2DI
);
17475 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
17476 VSX_BUILTIN_LD_ELEMREV_V4SF
);
17477 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
17478 VSX_BUILTIN_LD_ELEMREV_V4SI
);
17479 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
17480 VSX_BUILTIN_ST_ELEMREV_V2DF
);
17481 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
17482 VSX_BUILTIN_ST_ELEMREV_V2DI
);
17483 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
17484 VSX_BUILTIN_ST_ELEMREV_V4SF
);
17485 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
17486 VSX_BUILTIN_ST_ELEMREV_V4SI
);
17488 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid
,
17489 VSX_BUILTIN_XL_BE_V8HI
);
17490 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid
,
17491 VSX_BUILTIN_XL_BE_V4SI
);
17492 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid
,
17493 VSX_BUILTIN_XL_BE_V2DI
);
17494 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid
,
17495 VSX_BUILTIN_XL_BE_V4SF
);
17496 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid
,
17497 VSX_BUILTIN_XL_BE_V2DF
);
17498 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid
,
17499 VSX_BUILTIN_XL_BE_V16QI
);
17501 if (TARGET_P9_VECTOR
)
17503 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
17504 VSX_BUILTIN_LD_ELEMREV_V8HI
);
17505 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
17506 VSX_BUILTIN_LD_ELEMREV_V16QI
);
17507 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17508 void_ftype_v8hi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V8HI
);
17509 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17510 void_ftype_v16qi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V16QI
);
17514 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V8HI
]
17515 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V8HI
];
17516 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V16QI
]
17517 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V16QI
];
17518 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V8HI
]
17519 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V8HI
];
17520 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V16QI
]
17521 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V16QI
];
17524 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
17525 VSX_BUILTIN_VEC_LD
);
17526 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
17527 VSX_BUILTIN_VEC_ST
);
17528 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
17529 VSX_BUILTIN_VEC_XL
);
17530 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
17531 VSX_BUILTIN_VEC_XL_BE
);
17532 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
17533 VSX_BUILTIN_VEC_XST
);
17535 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
17536 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
17537 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
17539 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
17540 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
17541 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
17542 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
17543 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
17544 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
17545 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
17546 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
17547 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
17548 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
17549 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
17550 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
17552 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
17553 ALTIVEC_BUILTIN_VEC_ADDE
);
17554 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
17555 ALTIVEC_BUILTIN_VEC_ADDEC
);
17556 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
17557 ALTIVEC_BUILTIN_VEC_CMPNE
);
17558 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
17559 ALTIVEC_BUILTIN_VEC_MUL
);
17560 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
17561 ALTIVEC_BUILTIN_VEC_SUBE
);
17562 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
17563 ALTIVEC_BUILTIN_VEC_SUBEC
);
17565 /* Cell builtins. */
17566 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
17567 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
17568 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
17569 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
17571 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
17572 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
17573 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
17574 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
17576 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
17577 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
17578 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
17579 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
17581 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
17582 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
17583 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
17584 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
17586 if (TARGET_P9_VECTOR
)
17588 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
17589 P9V_BUILTIN_STXVL
);
17590 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long
,
17591 P9V_BUILTIN_XST_LEN_R
);
17594 /* Add the DST variants. */
17596 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
17598 HOST_WIDE_INT mask
= d
->mask
;
17600 /* It is expected that these dst built-in functions may have
17601 d->icode equal to CODE_FOR_nothing. */
17602 if ((mask
& builtin_mask
) != mask
)
17604 if (TARGET_DEBUG_BUILTIN
)
17605 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
17609 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
17612 /* Initialize the predicates. */
17613 d
= bdesc_altivec_preds
;
17614 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
17616 machine_mode mode1
;
17618 HOST_WIDE_INT mask
= d
->mask
;
17620 if ((mask
& builtin_mask
) != mask
)
17622 if (TARGET_DEBUG_BUILTIN
)
17623 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
17628 if (rs6000_overloaded_builtin_p (d
->code
))
17632 /* Cannot define builtin if the instruction is disabled. */
17633 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17634 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17640 type
= int_ftype_int_opaque_opaque
;
17643 type
= int_ftype_int_v2di_v2di
;
17646 type
= int_ftype_int_v4si_v4si
;
17649 type
= int_ftype_int_v8hi_v8hi
;
17652 type
= int_ftype_int_v16qi_v16qi
;
17655 type
= int_ftype_int_v4sf_v4sf
;
17658 type
= int_ftype_int_v2df_v2df
;
17661 gcc_unreachable ();
17664 def_builtin (d
->name
, type
, d
->code
);
17667 /* Initialize the abs* operators. */
17669 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17671 machine_mode mode0
;
17673 HOST_WIDE_INT mask
= d
->mask
;
17675 if ((mask
& builtin_mask
) != mask
)
17677 if (TARGET_DEBUG_BUILTIN
)
17678 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17683 /* Cannot define builtin if the instruction is disabled. */
17684 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17685 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17690 type
= v2di_ftype_v2di
;
17693 type
= v4si_ftype_v4si
;
17696 type
= v8hi_ftype_v8hi
;
17699 type
= v16qi_ftype_v16qi
;
17702 type
= v4sf_ftype_v4sf
;
17705 type
= v2df_ftype_v2df
;
17708 gcc_unreachable ();
17711 def_builtin (d
->name
, type
, d
->code
);
17714 /* Initialize target builtin that implements
17715 targetm.vectorize.builtin_mask_for_load. */
17717 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17718 v16qi_ftype_long_pcvoid
,
17719 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17720 BUILT_IN_MD
, NULL
, NULL_TREE
);
17721 TREE_READONLY (decl
) = 1;
17722 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17723 altivec_builtin_mask_for_load
= decl
;
17725 /* Access to the vec_init patterns. */
17726 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17727 integer_type_node
, integer_type_node
,
17728 integer_type_node
, NULL_TREE
);
17729 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17731 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17732 short_integer_type_node
,
17733 short_integer_type_node
,
17734 short_integer_type_node
,
17735 short_integer_type_node
,
17736 short_integer_type_node
,
17737 short_integer_type_node
,
17738 short_integer_type_node
, NULL_TREE
);
17739 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17741 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17742 char_type_node
, char_type_node
,
17743 char_type_node
, char_type_node
,
17744 char_type_node
, char_type_node
,
17745 char_type_node
, char_type_node
,
17746 char_type_node
, char_type_node
,
17747 char_type_node
, char_type_node
,
17748 char_type_node
, char_type_node
,
17749 char_type_node
, NULL_TREE
);
17750 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17751 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17753 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17754 float_type_node
, float_type_node
,
17755 float_type_node
, NULL_TREE
);
17756 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17758 /* VSX builtins. */
17759 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17760 double_type_node
, NULL_TREE
);
17761 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17763 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17764 intDI_type_node
, NULL_TREE
);
17765 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17767 /* Access to the vec_set patterns. */
17768 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17770 integer_type_node
, NULL_TREE
);
17771 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17773 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17775 integer_type_node
, NULL_TREE
);
17776 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17778 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17780 integer_type_node
, NULL_TREE
);
17781 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17783 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17785 integer_type_node
, NULL_TREE
);
17786 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17788 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17790 integer_type_node
, NULL_TREE
);
17791 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17793 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17795 integer_type_node
, NULL_TREE
);
17796 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17798 /* Access to the vec_extract patterns. */
17799 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17800 integer_type_node
, NULL_TREE
);
17801 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17803 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17804 integer_type_node
, NULL_TREE
);
17805 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17807 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17808 integer_type_node
, NULL_TREE
);
17809 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17811 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17812 integer_type_node
, NULL_TREE
);
17813 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17815 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17816 integer_type_node
, NULL_TREE
);
17817 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17819 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17820 integer_type_node
, NULL_TREE
);
17821 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17824 if (V1TI_type_node
)
17826 tree v1ti_ftype_long_pcvoid
17827 = build_function_type_list (V1TI_type_node
,
17828 long_integer_type_node
, pcvoid_type_node
,
17830 tree void_ftype_v1ti_long_pvoid
17831 = build_function_type_list (void_type_node
,
17832 V1TI_type_node
, long_integer_type_node
,
17833 pvoid_type_node
, NULL_TREE
);
17834 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17835 VSX_BUILTIN_LXVD2X_V1TI
);
17836 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17837 VSX_BUILTIN_STXVD2X_V1TI
);
17838 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17839 NULL_TREE
, NULL_TREE
);
17840 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17841 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17843 integer_type_node
, NULL_TREE
);
17844 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17845 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17846 integer_type_node
, NULL_TREE
);
17847 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17853 htm_init_builtins (void)
17855 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17856 const struct builtin_description
*d
;
17860 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17862 tree op
[MAX_HTM_OPERANDS
], type
;
17863 HOST_WIDE_INT mask
= d
->mask
;
17864 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17865 bool void_func
= (attr
& RS6000_BTC_VOID
);
17866 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17868 tree gpr_type_node
;
17872 /* It is expected that these htm built-in functions may have
17873 d->icode equal to CODE_FOR_nothing. */
17875 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17876 gpr_type_node
= long_long_unsigned_type_node
;
17878 gpr_type_node
= long_unsigned_type_node
;
17880 if (attr
& RS6000_BTC_SPR
)
17882 rettype
= gpr_type_node
;
17883 argtype
= gpr_type_node
;
17885 else if (d
->code
== HTM_BUILTIN_TABORTDC
17886 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17888 rettype
= unsigned_type_node
;
17889 argtype
= gpr_type_node
;
17893 rettype
= unsigned_type_node
;
17894 argtype
= unsigned_type_node
;
17897 if ((mask
& builtin_mask
) != mask
)
17899 if (TARGET_DEBUG_BUILTIN
)
17900 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17906 if (TARGET_DEBUG_BUILTIN
)
17907 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17908 (long unsigned) i
);
17912 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17914 if (attr_args
== RS6000_BTC_UNARY
)
17915 op
[nopnds
++] = argtype
;
17916 else if (attr_args
== RS6000_BTC_BINARY
)
17918 op
[nopnds
++] = argtype
;
17919 op
[nopnds
++] = argtype
;
17921 else if (attr_args
== RS6000_BTC_TERNARY
)
17923 op
[nopnds
++] = argtype
;
17924 op
[nopnds
++] = argtype
;
17925 op
[nopnds
++] = argtype
;
17931 type
= build_function_type_list (op
[0], NULL_TREE
);
17934 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17937 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17940 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17944 gcc_unreachable ();
17947 def_builtin (d
->name
, type
, d
->code
);
17951 /* Hash function for builtin functions with up to 3 arguments and a return
17954 builtin_hasher::hash (builtin_hash_struct
*bh
)
17959 for (i
= 0; i
< 4; i
++)
17961 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17962 ret
= (ret
* 2) + bh
->uns_p
[i
];
17968 /* Compare builtin hash entries H1 and H2 for equivalence. */
17970 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
17972 return ((p1
->mode
[0] == p2
->mode
[0])
17973 && (p1
->mode
[1] == p2
->mode
[1])
17974 && (p1
->mode
[2] == p2
->mode
[2])
17975 && (p1
->mode
[3] == p2
->mode
[3])
17976 && (p1
->uns_p
[0] == p2
->uns_p
[0])
17977 && (p1
->uns_p
[1] == p2
->uns_p
[1])
17978 && (p1
->uns_p
[2] == p2
->uns_p
[2])
17979 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
17982 /* Map types for builtin functions with an explicit return type and up to 3
17983 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17984 of the argument. */
17986 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
17987 machine_mode mode_arg1
, machine_mode mode_arg2
,
17988 enum rs6000_builtins builtin
, const char *name
)
17990 struct builtin_hash_struct h
;
17991 struct builtin_hash_struct
*h2
;
17994 tree ret_type
= NULL_TREE
;
17995 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
17997 /* Create builtin_hash_table. */
17998 if (builtin_hash_table
== NULL
)
17999 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
18001 h
.type
= NULL_TREE
;
18002 h
.mode
[0] = mode_ret
;
18003 h
.mode
[1] = mode_arg0
;
18004 h
.mode
[2] = mode_arg1
;
18005 h
.mode
[3] = mode_arg2
;
18011 /* If the builtin is a type that produces unsigned results or takes unsigned
18012 arguments, and it is returned as a decl for the vectorizer (such as
18013 widening multiplies, permute), make sure the arguments and return value
18014 are type correct. */
18017 /* unsigned 1 argument functions. */
18018 case CRYPTO_BUILTIN_VSBOX
:
18019 case P8V_BUILTIN_VGBBD
:
18020 case MISC_BUILTIN_CDTBCD
:
18021 case MISC_BUILTIN_CBCDTD
:
18026 /* unsigned 2 argument functions. */
18027 case ALTIVEC_BUILTIN_VMULEUB
:
18028 case ALTIVEC_BUILTIN_VMULEUH
:
18029 case ALTIVEC_BUILTIN_VMULEUW
:
18030 case ALTIVEC_BUILTIN_VMULOUB
:
18031 case ALTIVEC_BUILTIN_VMULOUH
:
18032 case ALTIVEC_BUILTIN_VMULOUW
:
18033 case CRYPTO_BUILTIN_VCIPHER
:
18034 case CRYPTO_BUILTIN_VCIPHERLAST
:
18035 case CRYPTO_BUILTIN_VNCIPHER
:
18036 case CRYPTO_BUILTIN_VNCIPHERLAST
:
18037 case CRYPTO_BUILTIN_VPMSUMB
:
18038 case CRYPTO_BUILTIN_VPMSUMH
:
18039 case CRYPTO_BUILTIN_VPMSUMW
:
18040 case CRYPTO_BUILTIN_VPMSUMD
:
18041 case CRYPTO_BUILTIN_VPMSUM
:
18042 case MISC_BUILTIN_ADDG6S
:
18043 case MISC_BUILTIN_DIVWEU
:
18044 case MISC_BUILTIN_DIVWEUO
:
18045 case MISC_BUILTIN_DIVDEU
:
18046 case MISC_BUILTIN_DIVDEUO
:
18047 case VSX_BUILTIN_UDIV_V2DI
:
18048 case ALTIVEC_BUILTIN_VMAXUB
:
18049 case ALTIVEC_BUILTIN_VMINUB
:
18050 case ALTIVEC_BUILTIN_VMAXUH
:
18051 case ALTIVEC_BUILTIN_VMINUH
:
18052 case ALTIVEC_BUILTIN_VMAXUW
:
18053 case ALTIVEC_BUILTIN_VMINUW
:
18054 case P8V_BUILTIN_VMAXUD
:
18055 case P8V_BUILTIN_VMINUD
:
18061 /* unsigned 3 argument functions. */
18062 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
18063 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
18064 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
18065 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
18066 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
18067 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
18068 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
18069 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
18070 case VSX_BUILTIN_VPERM_16QI_UNS
:
18071 case VSX_BUILTIN_VPERM_8HI_UNS
:
18072 case VSX_BUILTIN_VPERM_4SI_UNS
:
18073 case VSX_BUILTIN_VPERM_2DI_UNS
:
18074 case VSX_BUILTIN_XXSEL_16QI_UNS
:
18075 case VSX_BUILTIN_XXSEL_8HI_UNS
:
18076 case VSX_BUILTIN_XXSEL_4SI_UNS
:
18077 case VSX_BUILTIN_XXSEL_2DI_UNS
:
18078 case CRYPTO_BUILTIN_VPERMXOR
:
18079 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
18080 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
18081 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
18082 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
18083 case CRYPTO_BUILTIN_VSHASIGMAW
:
18084 case CRYPTO_BUILTIN_VSHASIGMAD
:
18085 case CRYPTO_BUILTIN_VSHASIGMA
:
18092 /* signed permute functions with unsigned char mask. */
18093 case ALTIVEC_BUILTIN_VPERM_16QI
:
18094 case ALTIVEC_BUILTIN_VPERM_8HI
:
18095 case ALTIVEC_BUILTIN_VPERM_4SI
:
18096 case ALTIVEC_BUILTIN_VPERM_4SF
:
18097 case ALTIVEC_BUILTIN_VPERM_2DI
:
18098 case ALTIVEC_BUILTIN_VPERM_2DF
:
18099 case VSX_BUILTIN_VPERM_16QI
:
18100 case VSX_BUILTIN_VPERM_8HI
:
18101 case VSX_BUILTIN_VPERM_4SI
:
18102 case VSX_BUILTIN_VPERM_4SF
:
18103 case VSX_BUILTIN_VPERM_2DI
:
18104 case VSX_BUILTIN_VPERM_2DF
:
18108 /* unsigned args, signed return. */
18109 case VSX_BUILTIN_XVCVUXDSP
:
18110 case VSX_BUILTIN_XVCVUXDDP_UNS
:
18111 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
18115 /* signed args, unsigned return. */
18116 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
18117 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
18118 case MISC_BUILTIN_UNPACK_TD
:
18119 case MISC_BUILTIN_UNPACK_V1TI
:
18123 /* unsigned arguments for 128-bit pack instructions. */
18124 case MISC_BUILTIN_PACK_TD
:
18125 case MISC_BUILTIN_PACK_V1TI
:
18130 /* unsigned second arguments (vector shift right). */
18131 case ALTIVEC_BUILTIN_VSRB
:
18132 case ALTIVEC_BUILTIN_VSRH
:
18133 case ALTIVEC_BUILTIN_VSRW
:
18134 case P8V_BUILTIN_VSRD
:
18142 /* Figure out how many args are present. */
18143 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
18146 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
18147 if (!ret_type
&& h
.uns_p
[0])
18148 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
18151 fatal_error (input_location
,
18152 "internal error: builtin function %qs had an unexpected "
18153 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
18155 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
18156 arg_type
[i
] = NULL_TREE
;
18158 for (i
= 0; i
< num_args
; i
++)
18160 int m
= (int) h
.mode
[i
+1];
18161 int uns_p
= h
.uns_p
[i
+1];
18163 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
18164 if (!arg_type
[i
] && uns_p
)
18165 arg_type
[i
] = builtin_mode_to_type
[m
][0];
18168 fatal_error (input_location
,
18169 "internal error: builtin function %qs, argument %d "
18170 "had unexpected argument type %qs", name
, i
,
18171 GET_MODE_NAME (m
));
18174 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
18175 if (*found
== NULL
)
18177 h2
= ggc_alloc
<builtin_hash_struct
> ();
18181 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
18182 arg_type
[2], NULL_TREE
);
18185 return (*found
)->type
;
18189 rs6000_common_init_builtins (void)
18191 const struct builtin_description
*d
;
18194 tree opaque_ftype_opaque
= NULL_TREE
;
18195 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
18196 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
18197 tree v2si_ftype
= NULL_TREE
;
18198 tree v2si_ftype_qi
= NULL_TREE
;
18199 tree v2si_ftype_v2si_qi
= NULL_TREE
;
18200 tree v2si_ftype_int_qi
= NULL_TREE
;
18201 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
18203 if (!TARGET_PAIRED_FLOAT
)
18205 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
18206 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
18209 /* Paired builtins are only available if you build a compiler with the
18210 appropriate options, so only create those builtins with the appropriate
18211 compiler option. Create Altivec and VSX builtins on machines with at
18212 least the general purpose extensions (970 and newer) to allow the use of
18213 the target attribute.. */
18215 if (TARGET_EXTRA_BUILTINS
)
18216 builtin_mask
|= RS6000_BTM_COMMON
;
18218 /* Add the ternary operators. */
18220 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
18223 HOST_WIDE_INT mask
= d
->mask
;
18225 if ((mask
& builtin_mask
) != mask
)
18227 if (TARGET_DEBUG_BUILTIN
)
18228 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
18232 if (rs6000_overloaded_builtin_p (d
->code
))
18234 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
18235 type
= opaque_ftype_opaque_opaque_opaque
18236 = build_function_type_list (opaque_V4SI_type_node
,
18237 opaque_V4SI_type_node
,
18238 opaque_V4SI_type_node
,
18239 opaque_V4SI_type_node
,
18244 enum insn_code icode
= d
->icode
;
18247 if (TARGET_DEBUG_BUILTIN
)
18248 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18254 if (icode
== CODE_FOR_nothing
)
18256 if (TARGET_DEBUG_BUILTIN
)
18257 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
18263 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
18264 insn_data
[icode
].operand
[1].mode
,
18265 insn_data
[icode
].operand
[2].mode
,
18266 insn_data
[icode
].operand
[3].mode
,
18270 def_builtin (d
->name
, type
, d
->code
);
18273 /* Add the binary operators. */
18275 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
18277 machine_mode mode0
, mode1
, mode2
;
18279 HOST_WIDE_INT mask
= d
->mask
;
18281 if ((mask
& builtin_mask
) != mask
)
18283 if (TARGET_DEBUG_BUILTIN
)
18284 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
18288 if (rs6000_overloaded_builtin_p (d
->code
))
18290 if (! (type
= opaque_ftype_opaque_opaque
))
18291 type
= opaque_ftype_opaque_opaque
18292 = build_function_type_list (opaque_V4SI_type_node
,
18293 opaque_V4SI_type_node
,
18294 opaque_V4SI_type_node
,
18299 enum insn_code icode
= d
->icode
;
18302 if (TARGET_DEBUG_BUILTIN
)
18303 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18309 if (icode
== CODE_FOR_nothing
)
18311 if (TARGET_DEBUG_BUILTIN
)
18312 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
18318 mode0
= insn_data
[icode
].operand
[0].mode
;
18319 mode1
= insn_data
[icode
].operand
[1].mode
;
18320 mode2
= insn_data
[icode
].operand
[2].mode
;
18322 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
18324 if (! (type
= v2si_ftype_v2si_qi
))
18325 type
= v2si_ftype_v2si_qi
18326 = build_function_type_list (opaque_V2SI_type_node
,
18327 opaque_V2SI_type_node
,
18332 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
18333 && mode2
== QImode
)
18335 if (! (type
= v2si_ftype_int_qi
))
18336 type
= v2si_ftype_int_qi
18337 = build_function_type_list (opaque_V2SI_type_node
,
18344 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
18348 def_builtin (d
->name
, type
, d
->code
);
18351 /* Add the simple unary operators. */
18353 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
18355 machine_mode mode0
, mode1
;
18357 HOST_WIDE_INT mask
= d
->mask
;
18359 if ((mask
& builtin_mask
) != mask
)
18361 if (TARGET_DEBUG_BUILTIN
)
18362 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
18366 if (rs6000_overloaded_builtin_p (d
->code
))
18368 if (! (type
= opaque_ftype_opaque
))
18369 type
= opaque_ftype_opaque
18370 = build_function_type_list (opaque_V4SI_type_node
,
18371 opaque_V4SI_type_node
,
18376 enum insn_code icode
= d
->icode
;
18379 if (TARGET_DEBUG_BUILTIN
)
18380 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18386 if (icode
== CODE_FOR_nothing
)
18388 if (TARGET_DEBUG_BUILTIN
)
18389 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
18395 mode0
= insn_data
[icode
].operand
[0].mode
;
18396 mode1
= insn_data
[icode
].operand
[1].mode
;
18398 if (mode0
== V2SImode
&& mode1
== QImode
)
18400 if (! (type
= v2si_ftype_qi
))
18401 type
= v2si_ftype_qi
18402 = build_function_type_list (opaque_V2SI_type_node
,
18408 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
18412 def_builtin (d
->name
, type
, d
->code
);
18415 /* Add the simple no-argument operators. */
18417 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
18419 machine_mode mode0
;
18421 HOST_WIDE_INT mask
= d
->mask
;
18423 if ((mask
& builtin_mask
) != mask
)
18425 if (TARGET_DEBUG_BUILTIN
)
18426 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
18429 if (rs6000_overloaded_builtin_p (d
->code
))
18431 if (!opaque_ftype_opaque
)
18432 opaque_ftype_opaque
18433 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
18434 type
= opaque_ftype_opaque
;
18438 enum insn_code icode
= d
->icode
;
18441 if (TARGET_DEBUG_BUILTIN
)
18442 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18443 (long unsigned) i
);
18446 if (icode
== CODE_FOR_nothing
)
18448 if (TARGET_DEBUG_BUILTIN
)
18450 "rs6000_builtin, skip no-argument %s (no code)\n",
18454 mode0
= insn_data
[icode
].operand
[0].mode
;
18455 if (mode0
== V2SImode
)
18457 /* code for paired single */
18458 if (! (type
= v2si_ftype
))
18461 = build_function_type_list (opaque_V2SI_type_node
,
18467 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
18470 def_builtin (d
->name
, type
, d
->code
);
18474 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18476 init_float128_ibm (machine_mode mode
)
18478 if (!TARGET_XL_COMPAT
)
18480 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
18481 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
18482 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
18483 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
18485 if (!TARGET_HARD_FLOAT
)
18487 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
18488 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
18489 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
18490 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
18491 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
18492 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
18493 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
18494 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
18496 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
18497 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
18498 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
18499 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
18500 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
18501 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
18502 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
18503 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
18508 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
18509 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
18510 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
18511 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
18514 /* Add various conversions for IFmode to use the traditional TFmode
18516 if (mode
== IFmode
)
18518 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf2");
18519 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf2");
18520 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctftd2");
18521 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd2");
18522 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd2");
18523 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdtf2");
18525 if (TARGET_POWERPC64
)
18527 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
18528 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
18529 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
18530 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
18535 /* Set up IEEE 128-bit floating point routines. Use different names if the
18536 arguments can be passed in a vector register. The historical PowerPC
18537 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18538 continue to use that if we aren't using vector registers to pass IEEE
18539 128-bit floating point. */
18542 init_float128_ieee (machine_mode mode
)
18544 if (FLOAT128_VECTOR_P (mode
))
18546 set_optab_libfunc (add_optab
, mode
, "__addkf3");
18547 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
18548 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
18549 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
18550 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
18551 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
18552 set_optab_libfunc (abs_optab
, mode
, "__abstkf2");
18554 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
18555 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
18556 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
18557 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
18558 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
18559 set_optab_libfunc (le_optab
, mode
, "__lekf2");
18560 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
18562 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
18563 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
18564 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
18565 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
18567 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__extendtfkf2");
18568 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18569 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__extendtfkf2");
18571 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__trunckftf2");
18572 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18573 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__trunckftf2");
18575 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf2");
18576 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf2");
18577 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunckftd2");
18578 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd2");
18579 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd2");
18580 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdkf2");
18582 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
18583 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
18584 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
18585 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
18587 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
18588 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
18589 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
18590 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
18592 if (TARGET_POWERPC64
)
18594 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
18595 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
18596 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
18597 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
18603 set_optab_libfunc (add_optab
, mode
, "_q_add");
18604 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
18605 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
18606 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
18607 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
18608 if (TARGET_PPC_GPOPT
)
18609 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
18611 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
18612 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
18613 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
18614 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
18615 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
18616 set_optab_libfunc (le_optab
, mode
, "_q_fle");
18618 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
18619 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
18620 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18621 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18622 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18623 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18624 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18625 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18630 rs6000_init_libfuncs (void)
18632 /* __float128 support. */
18633 if (TARGET_FLOAT128_TYPE
)
18635 init_float128_ibm (IFmode
);
18636 init_float128_ieee (KFmode
);
18639 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18640 if (TARGET_LONG_DOUBLE_128
)
18642 if (!TARGET_IEEEQUAD
)
18643 init_float128_ibm (TFmode
);
18645 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18647 init_float128_ieee (TFmode
);
18651 /* Emit a potentially record-form instruction, setting DST from SRC.
18652 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18653 signed comparison of DST with zero. If DOT is 1, the generated RTL
18654 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18655 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18656 a separate COMPARE. */
18659 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18663 emit_move_insn (dst
, src
);
18667 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18669 emit_move_insn (dst
, src
);
18670 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18674 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18677 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18678 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18682 rtx set
= gen_rtx_SET (dst
, src
);
18683 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18688 /* A validation routine: say whether CODE, a condition code, and MODE
18689 match. The other alternatives either don't make sense or should
18690 never be generated. */
18693 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18695 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18696 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18697 && GET_MODE_CLASS (mode
) == MODE_CC
);
18699 /* These don't make sense. */
18700 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18701 || mode
!= CCUNSmode
);
18703 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18704 || mode
== CCUNSmode
);
18706 gcc_assert (mode
== CCFPmode
18707 || (code
!= ORDERED
&& code
!= UNORDERED
18708 && code
!= UNEQ
&& code
!= LTGT
18709 && code
!= UNGT
&& code
!= UNLT
18710 && code
!= UNGE
&& code
!= UNLE
));
18712 /* These should never be generated except for
18713 flag_finite_math_only. */
18714 gcc_assert (mode
!= CCFPmode
18715 || flag_finite_math_only
18716 || (code
!= LE
&& code
!= GE
18717 && code
!= UNEQ
&& code
!= LTGT
18718 && code
!= UNGT
&& code
!= UNLT
));
18720 /* These are invalid; the information is not there. */
18721 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18725 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18726 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18727 not zero, store there the bit offset (counted from the right) where
18728 the single stretch of 1 bits begins; and similarly for B, the bit
18729 offset where it ends. */
18732 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18734 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18735 unsigned HOST_WIDE_INT bit
;
18737 int n
= GET_MODE_PRECISION (mode
);
18739 if (mode
!= DImode
&& mode
!= SImode
)
18742 if (INTVAL (mask
) >= 0)
18745 ne
= exact_log2 (bit
);
18746 nb
= exact_log2 (val
+ bit
);
18748 else if (val
+ 1 == 0)
18757 nb
= exact_log2 (bit
);
18758 ne
= exact_log2 (val
+ bit
);
18763 ne
= exact_log2 (bit
);
18764 if (val
+ bit
== 0)
18772 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18783 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18784 or rldicr instruction, to implement an AND with it in mode MODE. */
18787 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18791 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18794 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18796 if (mode
== DImode
)
18797 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18799 /* For SImode, rlwinm can do everything. */
18800 if (mode
== SImode
)
18801 return (nb
< 32 && ne
< 32);
18806 /* Return the instruction template for an AND with mask in mode MODE, with
18807 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18810 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18814 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18815 gcc_unreachable ();
18817 if (mode
== DImode
&& ne
== 0)
18819 operands
[3] = GEN_INT (63 - nb
);
18821 return "rldicl. %0,%1,0,%3";
18822 return "rldicl %0,%1,0,%3";
18825 if (mode
== DImode
&& nb
== 63)
18827 operands
[3] = GEN_INT (63 - ne
);
18829 return "rldicr. %0,%1,0,%3";
18830 return "rldicr %0,%1,0,%3";
18833 if (nb
< 32 && ne
< 32)
18835 operands
[3] = GEN_INT (31 - nb
);
18836 operands
[4] = GEN_INT (31 - ne
);
18838 return "rlwinm. %0,%1,0,%3,%4";
18839 return "rlwinm %0,%1,0,%3,%4";
18842 gcc_unreachable ();
18845 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18846 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18847 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18850 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18854 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18857 int n
= GET_MODE_PRECISION (mode
);
18860 if (CONST_INT_P (XEXP (shift
, 1)))
18862 sh
= INTVAL (XEXP (shift
, 1));
18863 if (sh
< 0 || sh
>= n
)
18867 rtx_code code
= GET_CODE (shift
);
18869 /* Convert any shift by 0 to a rotate, to simplify below code. */
18873 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18874 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18876 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18882 /* DImode rotates need rld*. */
18883 if (mode
== DImode
&& code
== ROTATE
)
18884 return (nb
== 63 || ne
== 0 || ne
== sh
);
18886 /* SImode rotates need rlw*. */
18887 if (mode
== SImode
&& code
== ROTATE
)
18888 return (nb
< 32 && ne
< 32 && sh
< 32);
18890 /* Wrap-around masks are only okay for rotates. */
18894 /* Variable shifts are only okay for rotates. */
18898 /* Don't allow ASHIFT if the mask is wrong for that. */
18899 if (code
== ASHIFT
&& ne
< sh
)
18902 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18903 if the mask is wrong for that. */
18904 if (nb
< 32 && ne
< 32 && sh
< 32
18905 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18908 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18909 if the mask is wrong for that. */
18910 if (code
== LSHIFTRT
)
18912 if (nb
== 63 || ne
== 0 || ne
== sh
)
18913 return !(code
== LSHIFTRT
&& nb
>= sh
);
18918 /* Return the instruction template for a shift with mask in mode MODE, with
18919 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18922 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18926 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18927 gcc_unreachable ();
18929 if (mode
== DImode
&& ne
== 0)
18931 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18932 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18933 operands
[3] = GEN_INT (63 - nb
);
18935 return "rld%I2cl. %0,%1,%2,%3";
18936 return "rld%I2cl %0,%1,%2,%3";
18939 if (mode
== DImode
&& nb
== 63)
18941 operands
[3] = GEN_INT (63 - ne
);
18943 return "rld%I2cr. %0,%1,%2,%3";
18944 return "rld%I2cr %0,%1,%2,%3";
18948 && GET_CODE (operands
[4]) != LSHIFTRT
18949 && CONST_INT_P (operands
[2])
18950 && ne
== INTVAL (operands
[2]))
18952 operands
[3] = GEN_INT (63 - nb
);
18954 return "rld%I2c. %0,%1,%2,%3";
18955 return "rld%I2c %0,%1,%2,%3";
18958 if (nb
< 32 && ne
< 32)
18960 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18961 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18962 operands
[3] = GEN_INT (31 - nb
);
18963 operands
[4] = GEN_INT (31 - ne
);
18964 /* This insn can also be a 64-bit rotate with mask that really makes
18965 it just a shift right (with mask); the %h below are to adjust for
18966 that situation (shift count is >= 32 in that case). */
18968 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18969 return "rlw%I2nm %0,%1,%h2,%3,%4";
18972 gcc_unreachable ();
18975 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18976 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18977 ASHIFT, or LSHIFTRT) in mode MODE. */
18980 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
18984 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18987 int n
= GET_MODE_PRECISION (mode
);
18989 int sh
= INTVAL (XEXP (shift
, 1));
18990 if (sh
< 0 || sh
>= n
)
18993 rtx_code code
= GET_CODE (shift
);
18995 /* Convert any shift by 0 to a rotate, to simplify below code. */
18999 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19000 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
19002 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
19008 /* DImode rotates need rldimi. */
19009 if (mode
== DImode
&& code
== ROTATE
)
19012 /* SImode rotates need rlwimi. */
19013 if (mode
== SImode
&& code
== ROTATE
)
19014 return (nb
< 32 && ne
< 32 && sh
< 32);
19016 /* Wrap-around masks are only okay for rotates. */
19020 /* Don't allow ASHIFT if the mask is wrong for that. */
19021 if (code
== ASHIFT
&& ne
< sh
)
19024 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19025 if the mask is wrong for that. */
19026 if (nb
< 32 && ne
< 32 && sh
< 32
19027 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
19030 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19031 if the mask is wrong for that. */
19032 if (code
== LSHIFTRT
)
19035 return !(code
== LSHIFTRT
&& nb
>= sh
);
19040 /* Return the instruction template for an insert with mask in mode MODE, with
19041 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19044 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
19048 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
19049 gcc_unreachable ();
19051 /* Prefer rldimi because rlwimi is cracked. */
19052 if (TARGET_POWERPC64
19053 && (!dot
|| mode
== DImode
)
19054 && GET_CODE (operands
[4]) != LSHIFTRT
19055 && ne
== INTVAL (operands
[2]))
19057 operands
[3] = GEN_INT (63 - nb
);
19059 return "rldimi. %0,%1,%2,%3";
19060 return "rldimi %0,%1,%2,%3";
19063 if (nb
< 32 && ne
< 32)
19065 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
19066 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
19067 operands
[3] = GEN_INT (31 - nb
);
19068 operands
[4] = GEN_INT (31 - ne
);
19070 return "rlwimi. %0,%1,%2,%3,%4";
19071 return "rlwimi %0,%1,%2,%3,%4";
19074 gcc_unreachable ();
19077 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19078 using two machine instructions. */
19081 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
19083 /* There are two kinds of AND we can handle with two insns:
19084 1) those we can do with two rl* insn;
19087 We do not handle that last case yet. */
19089 /* If there is just one stretch of ones, we can do it. */
19090 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
19093 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19094 one insn, we can do the whole thing with two. */
19095 unsigned HOST_WIDE_INT val
= INTVAL (c
);
19096 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19097 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19098 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19099 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19100 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
19103 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19104 If EXPAND is true, split rotate-and-mask instructions we generate to
19105 their constituent parts as well (this is used during expand); if DOT
19106 is 1, make the last insn a record-form instruction clobbering the
19107 destination GPR and setting the CC reg (from operands[3]); if 2, set
19108 that GPR as well as the CC reg. */
19111 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
19113 gcc_assert (!(expand
&& dot
));
19115 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
19117 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19118 shift right. This generates better code than doing the masks without
19119 shifts, or shifting first right and then left. */
19121 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
19123 gcc_assert (mode
== DImode
);
19125 int shift
= 63 - nb
;
19128 rtx tmp1
= gen_reg_rtx (DImode
);
19129 rtx tmp2
= gen_reg_rtx (DImode
);
19130 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
19131 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
19132 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
19136 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
19137 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
19138 emit_move_insn (operands
[0], tmp
);
19139 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
19140 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19145 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19146 that does the rest. */
19147 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19148 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19149 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19150 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19152 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
19153 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
19155 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
19157 /* Two "no-rotate"-and-mask instructions, for SImode. */
19158 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
19160 gcc_assert (mode
== SImode
);
19162 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19163 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
19164 emit_move_insn (reg
, tmp
);
19165 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19166 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19170 gcc_assert (mode
== DImode
);
19172 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19173 insns; we have to do the first in SImode, because it wraps. */
19174 if (mask2
<= 0xffffffff
19175 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
19177 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19178 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
19180 rtx reg_low
= gen_lowpart (SImode
, reg
);
19181 emit_move_insn (reg_low
, tmp
);
19182 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19183 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19187 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19188 at the top end), rotate back and clear the other hole. */
19189 int right
= exact_log2 (bit3
);
19190 int left
= 64 - right
;
19192 /* Rotate the mask too. */
19193 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
19197 rtx tmp1
= gen_reg_rtx (DImode
);
19198 rtx tmp2
= gen_reg_rtx (DImode
);
19199 rtx tmp3
= gen_reg_rtx (DImode
);
19200 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
19201 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
19202 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
19203 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
19207 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
19208 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
19209 emit_move_insn (operands
[0], tmp
);
19210 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
19211 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
19212 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19216 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19217 for lfq and stfq insns iff the registers are hard registers. */
19220 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
19222 /* We might have been passed a SUBREG. */
19223 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
19226 /* We might have been passed non floating point registers. */
19227 if (!FP_REGNO_P (REGNO (reg1
))
19228 || !FP_REGNO_P (REGNO (reg2
)))
19231 return (REGNO (reg1
) == REGNO (reg2
) - 1);
19234 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19235 addr1 and addr2 must be in consecutive memory locations
19236 (addr2 == addr1 + 8). */
19239 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
19242 unsigned int reg1
, reg2
;
19243 int offset1
, offset2
;
19245 /* The mems cannot be volatile. */
19246 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
19249 addr1
= XEXP (mem1
, 0);
19250 addr2
= XEXP (mem2
, 0);
19252 /* Extract an offset (if used) from the first addr. */
19253 if (GET_CODE (addr1
) == PLUS
)
19255 /* If not a REG, return zero. */
19256 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
19260 reg1
= REGNO (XEXP (addr1
, 0));
19261 /* The offset must be constant! */
19262 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
19264 offset1
= INTVAL (XEXP (addr1
, 1));
19267 else if (GET_CODE (addr1
) != REG
)
19271 reg1
= REGNO (addr1
);
19272 /* This was a simple (mem (reg)) expression. Offset is 0. */
19276 /* And now for the second addr. */
19277 if (GET_CODE (addr2
) == PLUS
)
19279 /* If not a REG, return zero. */
19280 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
19284 reg2
= REGNO (XEXP (addr2
, 0));
19285 /* The offset must be constant. */
19286 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
19288 offset2
= INTVAL (XEXP (addr2
, 1));
19291 else if (GET_CODE (addr2
) != REG
)
19295 reg2
= REGNO (addr2
);
19296 /* This was a simple (mem (reg)) expression. Offset is 0. */
19300 /* Both of these must have the same base register. */
19304 /* The offset for the second addr must be 8 more than the first addr. */
19305 if (offset2
!= offset1
+ 8)
19308 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19313 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19314 need to use DDmode, in all other cases we can use the same mode. */
19315 static machine_mode
19316 rs6000_secondary_memory_needed_mode (machine_mode mode
)
19318 if (lra_in_progress
&& mode
== SDmode
)
19323 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19324 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19325 only work on the traditional altivec registers, note if an altivec register
19328 static enum rs6000_reg_type
19329 register_to_reg_type (rtx reg
, bool *is_altivec
)
19331 HOST_WIDE_INT regno
;
19332 enum reg_class rclass
;
19334 if (GET_CODE (reg
) == SUBREG
)
19335 reg
= SUBREG_REG (reg
);
19338 return NO_REG_TYPE
;
19340 regno
= REGNO (reg
);
19341 if (regno
>= FIRST_PSEUDO_REGISTER
)
19343 if (!lra_in_progress
&& !reload_completed
)
19344 return PSEUDO_REG_TYPE
;
19346 regno
= true_regnum (reg
);
19347 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
19348 return PSEUDO_REG_TYPE
;
19351 gcc_assert (regno
>= 0);
19353 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
19354 *is_altivec
= true;
19356 rclass
= rs6000_regno_regclass
[regno
];
19357 return reg_class_to_reg_type
[(int)rclass
];
19360 /* Helper function to return the cost of adding a TOC entry address. */
19363 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
19367 if (TARGET_CMODEL
!= CMODEL_SMALL
)
19368 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
19371 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
19376 /* Helper function for rs6000_secondary_reload to determine whether the memory
19377 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19378 needs reloading. Return negative if the memory is not handled by the memory
19379 helper functions and to try a different reload method, 0 if no additional
19380 instructions are need, and positive to give the extra cost for the
19384 rs6000_secondary_reload_memory (rtx addr
,
19385 enum reg_class rclass
,
19388 int extra_cost
= 0;
19389 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
19390 addr_mask_type addr_mask
;
19391 const char *type
= NULL
;
19392 const char *fail_msg
= NULL
;
19394 if (GPR_REG_CLASS_P (rclass
))
19395 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19397 else if (rclass
== FLOAT_REGS
)
19398 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19400 else if (rclass
== ALTIVEC_REGS
)
19401 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19403 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19404 else if (rclass
== VSX_REGS
)
19405 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
19406 & ~RELOAD_REG_AND_M16
);
19408 /* If the register allocator hasn't made up its mind yet on the register
19409 class to use, settle on defaults to use. */
19410 else if (rclass
== NO_REGS
)
19412 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
19413 & ~RELOAD_REG_AND_M16
);
19415 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
19416 addr_mask
&= ~(RELOAD_REG_INDEXED
19417 | RELOAD_REG_PRE_INCDEC
19418 | RELOAD_REG_PRE_MODIFY
);
19424 /* If the register isn't valid in this register class, just return now. */
19425 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19427 if (TARGET_DEBUG_ADDR
)
19430 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19431 "not valid in class\n",
19432 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
19439 switch (GET_CODE (addr
))
19441 /* Does the register class supports auto update forms for this mode? We
19442 don't need a scratch register, since the powerpc only supports
19443 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19446 reg
= XEXP (addr
, 0);
19447 if (!base_reg_operand (addr
, GET_MODE (reg
)))
19449 fail_msg
= "no base register #1";
19453 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19461 reg
= XEXP (addr
, 0);
19462 plus_arg1
= XEXP (addr
, 1);
19463 if (!base_reg_operand (reg
, GET_MODE (reg
))
19464 || GET_CODE (plus_arg1
) != PLUS
19465 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
19467 fail_msg
= "bad PRE_MODIFY";
19471 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19478 /* Do we need to simulate AND -16 to clear the bottom address bits used
19479 in VMX load/stores? Only allow the AND for vector sizes. */
19481 and_arg
= XEXP (addr
, 0);
19482 if (GET_MODE_SIZE (mode
) != 16
19483 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
19484 || INTVAL (XEXP (addr
, 1)) != -16)
19486 fail_msg
= "bad Altivec AND #1";
19490 if (rclass
!= ALTIVEC_REGS
)
19492 if (legitimate_indirect_address_p (and_arg
, false))
19495 else if (legitimate_indexed_address_p (and_arg
, false))
19500 fail_msg
= "bad Altivec AND #2";
19508 /* If this is an indirect address, make sure it is a base register. */
19511 if (!legitimate_indirect_address_p (addr
, false))
19518 /* If this is an indexed address, make sure the register class can handle
19519 indexed addresses for this mode. */
19521 plus_arg0
= XEXP (addr
, 0);
19522 plus_arg1
= XEXP (addr
, 1);
19524 /* (plus (plus (reg) (constant)) (constant)) is generated during
19525 push_reload processing, so handle it now. */
19526 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
19528 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19535 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19536 push_reload processing, so handle it now. */
19537 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
19539 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19542 type
= "indexed #2";
19546 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
19548 fail_msg
= "no base register #2";
19552 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
19554 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
19555 || !legitimate_indexed_address_p (addr
, false))
19562 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
19563 && CONST_INT_P (plus_arg1
))
19565 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
19568 type
= "vector d-form offset";
19572 /* Make sure the register class can handle offset addresses. */
19573 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19575 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19578 type
= "offset #2";
19584 fail_msg
= "bad PLUS";
19591 /* Quad offsets are restricted and can't handle normal addresses. */
19592 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19595 type
= "vector d-form lo_sum";
19598 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
19600 fail_msg
= "bad LO_SUM";
19604 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19611 /* Static addresses need to create a TOC entry. */
19615 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19618 type
= "vector d-form lo_sum #2";
19624 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19628 /* TOC references look like offsetable memory. */
19630 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19632 fail_msg
= "bad UNSPEC";
19636 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19639 type
= "vector d-form lo_sum #3";
19642 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19645 type
= "toc reference";
19651 fail_msg
= "bad address";
19656 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19658 if (extra_cost
< 0)
19660 "rs6000_secondary_reload_memory error: mode = %s, "
19661 "class = %s, addr_mask = '%s', %s\n",
19662 GET_MODE_NAME (mode
),
19663 reg_class_names
[rclass
],
19664 rs6000_debug_addr_mask (addr_mask
, false),
19665 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19669 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19670 "addr_mask = '%s', extra cost = %d, %s\n",
19671 GET_MODE_NAME (mode
),
19672 reg_class_names
[rclass
],
19673 rs6000_debug_addr_mask (addr_mask
, false),
19675 (type
) ? type
: "<none>");
19683 /* Helper function for rs6000_secondary_reload to return true if a move to a
19684 different register classe is really a simple move. */
19687 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19688 enum rs6000_reg_type from_type
,
19691 int size
= GET_MODE_SIZE (mode
);
19693 /* Add support for various direct moves available. In this function, we only
19694 look at cases where we don't need any extra registers, and one or more
19695 simple move insns are issued. Originally small integers are not allowed
19696 in FPR/VSX registers. Single precision binary floating is not a simple
19697 move because we need to convert to the single precision memory layout.
19698 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19699 need special direct move handling, which we do not support yet. */
19700 if (TARGET_DIRECT_MOVE
19701 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19702 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19704 if (TARGET_POWERPC64
)
19706 /* ISA 2.07: MTVSRD or MVFVSRD. */
19710 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19711 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19715 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19716 if (TARGET_P8_VECTOR
)
19718 if (mode
== SImode
)
19721 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19725 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19726 if (mode
== SDmode
)
19730 /* Power6+: MFTGPR or MFFGPR. */
19731 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19732 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19733 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19736 /* Move to/from SPR. */
19737 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19738 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19739 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19745 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19746 special direct moves that involve allocating an extra register, return the
19747 insn code of the helper function if there is such a function or
19748 CODE_FOR_nothing if not. */
19751 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19752 enum rs6000_reg_type from_type
,
19754 secondary_reload_info
*sri
,
19758 enum insn_code icode
= CODE_FOR_nothing
;
19760 int size
= GET_MODE_SIZE (mode
);
19762 if (TARGET_POWERPC64
&& size
== 16)
19764 /* Handle moving 128-bit values from GPRs to VSX point registers on
19765 ISA 2.07 (power8, power9) when running in 64-bit mode using
19766 XXPERMDI to glue the two 64-bit values back together. */
19767 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19769 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19770 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19773 /* Handle moving 128-bit values from VSX point registers to GPRs on
19774 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19775 bottom 64-bit value. */
19776 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19778 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19779 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19783 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19785 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19787 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19788 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19791 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19793 cost
= 2; /* mtvsrz, xscvspdpn. */
19794 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19798 else if (!TARGET_POWERPC64
&& size
== 8)
19800 /* Handle moving 64-bit values from GPRs to floating point registers on
19801 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19802 32-bit values back together. Altivec register classes must be handled
19803 specially since a different instruction is used, and the secondary
19804 reload support requires a single instruction class in the scratch
19805 register constraint. However, right now TFmode is not allowed in
19806 Altivec registers, so the pattern will never match. */
19807 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19809 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19810 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19814 if (icode
!= CODE_FOR_nothing
)
19819 sri
->icode
= icode
;
19820 sri
->extra_cost
= cost
;
19827 /* Return whether a move between two register classes can be done either
19828 directly (simple move) or via a pattern that uses a single extra temporary
19829 (using ISA 2.07's direct move in this case. */
19832 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19833 enum rs6000_reg_type from_type
,
19835 secondary_reload_info
*sri
,
19838 /* Fall back to load/store reloads if either type is not a register. */
19839 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19842 /* If we haven't allocated registers yet, assume the move can be done for the
19843 standard register types. */
19844 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19845 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19846 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19849 /* Moves to the same set of registers is a simple move for non-specialized
19851 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19854 /* Check whether a simple move can be done directly. */
19855 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19859 sri
->icode
= CODE_FOR_nothing
;
19860 sri
->extra_cost
= 0;
19865 /* Now check if we can do it in a few steps. */
19866 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19870 /* Inform reload about cases where moving X with a mode MODE to a register in
19871 RCLASS requires an extra scratch or immediate register. Return the class
19872 needed for the immediate register.
19874 For VSX and Altivec, we may need a register to convert sp+offset into
19877 For misaligned 64-bit gpr loads and stores we need a register to
19878 convert an offset address to indirect. */
19881 rs6000_secondary_reload (bool in_p
,
19883 reg_class_t rclass_i
,
19885 secondary_reload_info
*sri
)
19887 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19888 reg_class_t ret
= ALL_REGS
;
19889 enum insn_code icode
;
19890 bool default_p
= false;
19891 bool done_p
= false;
19893 /* Allow subreg of memory before/during reload. */
19894 bool memory_p
= (MEM_P (x
)
19895 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19896 && MEM_P (SUBREG_REG (x
))));
19898 sri
->icode
= CODE_FOR_nothing
;
19899 sri
->t_icode
= CODE_FOR_nothing
;
19900 sri
->extra_cost
= 0;
19902 ? reg_addr
[mode
].reload_load
19903 : reg_addr
[mode
].reload_store
);
19905 if (REG_P (x
) || register_operand (x
, mode
))
19907 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19908 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19909 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19912 std::swap (to_type
, from_type
);
19914 /* Can we do a direct move of some sort? */
19915 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19918 icode
= (enum insn_code
)sri
->icode
;
19925 /* Make sure 0.0 is not reloaded or forced into memory. */
19926 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19933 /* If this is a scalar floating point value and we want to load it into the
19934 traditional Altivec registers, do it via a move via a traditional floating
19935 point register, unless we have D-form addressing. Also make sure that
19936 non-zero constants use a FPR. */
19937 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19938 && !mode_supports_vmx_dform (mode
)
19939 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19940 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19947 /* Handle reload of load/stores if we have reload helper functions. */
19948 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
19950 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
19953 if (extra_cost
>= 0)
19957 if (extra_cost
> 0)
19959 sri
->extra_cost
= extra_cost
;
19960 sri
->icode
= icode
;
19965 /* Handle unaligned loads and stores of integer registers. */
19966 if (!done_p
&& TARGET_POWERPC64
19967 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19969 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
19971 rtx addr
= XEXP (x
, 0);
19972 rtx off
= address_offset (addr
);
19974 if (off
!= NULL_RTX
)
19976 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19977 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19979 /* We need a secondary reload when our legitimate_address_p
19980 says the address is good (as otherwise the entire address
19981 will be reloaded), and the offset is not a multiple of
19982 four or we have an address wrap. Address wrap will only
19983 occur for LO_SUMs since legitimate_offset_address_p
19984 rejects addresses for 16-byte mems that will wrap. */
19985 if (GET_CODE (addr
) == LO_SUM
19986 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19987 && ((offset
& 3) != 0
19988 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
19989 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
19990 && (offset
& 3) != 0))
19992 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19994 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
19995 : CODE_FOR_reload_di_load
);
19997 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
19998 : CODE_FOR_reload_di_store
);
19999 sri
->extra_cost
= 2;
20010 if (!done_p
&& !TARGET_POWERPC64
20011 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
20013 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
20015 rtx addr
= XEXP (x
, 0);
20016 rtx off
= address_offset (addr
);
20018 if (off
!= NULL_RTX
)
20020 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
20021 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
20023 /* We need a secondary reload when our legitimate_address_p
20024 says the address is good (as otherwise the entire address
20025 will be reloaded), and we have a wrap.
20027 legitimate_lo_sum_address_p allows LO_SUM addresses to
20028 have any offset so test for wrap in the low 16 bits.
20030 legitimate_offset_address_p checks for the range
20031 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20032 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20033 [0x7ff4,0x7fff] respectively, so test for the
20034 intersection of these ranges, [0x7ffc,0x7fff] and
20035 [0x7ff4,0x7ff7] respectively.
20037 Note that the address we see here may have been
20038 manipulated by legitimize_reload_address. */
20039 if (GET_CODE (addr
) == LO_SUM
20040 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
20041 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
20044 sri
->icode
= CODE_FOR_reload_si_load
;
20046 sri
->icode
= CODE_FOR_reload_si_store
;
20047 sri
->extra_cost
= 2;
20062 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
20064 gcc_assert (ret
!= ALL_REGS
);
20066 if (TARGET_DEBUG_ADDR
)
20069 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20071 reg_class_names
[ret
],
20072 in_p
? "true" : "false",
20073 reg_class_names
[rclass
],
20074 GET_MODE_NAME (mode
));
20076 if (reload_completed
)
20077 fputs (", after reload", stderr
);
20080 fputs (", done_p not set", stderr
);
20083 fputs (", default secondary reload", stderr
);
20085 if (sri
->icode
!= CODE_FOR_nothing
)
20086 fprintf (stderr
, ", reload func = %s, extra cost = %d",
20087 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
20089 else if (sri
->extra_cost
> 0)
20090 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
20092 fputs ("\n", stderr
);
20099 /* Better tracing for rs6000_secondary_reload_inner. */
20102 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
20107 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
20109 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
20110 store_p
? "store" : "load");
20113 set
= gen_rtx_SET (mem
, reg
);
20115 set
= gen_rtx_SET (reg
, mem
);
20117 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
20118 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
20121 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
20122 ATTRIBUTE_NORETURN
;
20125 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
20128 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
20129 gcc_unreachable ();
20132 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20133 reload helper functions. These were identified in
20134 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20135 reload, it calls the insns:
20136 reload_<RELOAD:mode>_<P:mptrsize>_store
20137 reload_<RELOAD:mode>_<P:mptrsize>_load
20139 which in turn calls this function, to do whatever is necessary to create
20140 valid addresses. */
20143 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20145 int regno
= true_regnum (reg
);
20146 machine_mode mode
= GET_MODE (reg
);
20147 addr_mask_type addr_mask
;
20150 rtx op_reg
, op0
, op1
;
20155 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
20156 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
20157 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20159 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
20160 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
20162 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
20163 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
20165 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
20166 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
20169 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20171 /* Make sure the mode is valid in this register class. */
20172 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
20173 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20175 if (TARGET_DEBUG_ADDR
)
20176 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
20178 new_addr
= addr
= XEXP (mem
, 0);
20179 switch (GET_CODE (addr
))
20181 /* Does the register class support auto update forms for this mode? If
20182 not, do the update now. We don't need a scratch register, since the
20183 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20186 op_reg
= XEXP (addr
, 0);
20187 if (!base_reg_operand (op_reg
, Pmode
))
20188 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20190 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
20192 emit_insn (gen_add2_insn (op_reg
, GEN_INT (GET_MODE_SIZE (mode
))));
20198 op0
= XEXP (addr
, 0);
20199 op1
= XEXP (addr
, 1);
20200 if (!base_reg_operand (op0
, Pmode
)
20201 || GET_CODE (op1
) != PLUS
20202 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
20203 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20205 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
20207 emit_insn (gen_rtx_SET (op0
, op1
));
20212 /* Do we need to simulate AND -16 to clear the bottom address bits used
20213 in VMX load/stores? */
20215 op0
= XEXP (addr
, 0);
20216 op1
= XEXP (addr
, 1);
20217 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
20219 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
20222 else if (GET_CODE (op1
) == PLUS
)
20224 emit_insn (gen_rtx_SET (scratch
, op1
));
20229 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20231 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
20232 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
20233 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
20234 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
20235 new_addr
= scratch
;
20239 /* If this is an indirect address, make sure it is a base register. */
20242 if (!base_reg_operand (addr
, GET_MODE (addr
)))
20244 emit_insn (gen_rtx_SET (scratch
, addr
));
20245 new_addr
= scratch
;
20249 /* If this is an indexed address, make sure the register class can handle
20250 indexed addresses for this mode. */
20252 op0
= XEXP (addr
, 0);
20253 op1
= XEXP (addr
, 1);
20254 if (!base_reg_operand (op0
, Pmode
))
20255 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20257 else if (int_reg_operand (op1
, Pmode
))
20259 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20261 emit_insn (gen_rtx_SET (scratch
, addr
));
20262 new_addr
= scratch
;
20266 else if (mode_supports_vsx_dform_quad (mode
) && CONST_INT_P (op1
))
20268 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
20269 || !quad_address_p (addr
, mode
, false))
20271 emit_insn (gen_rtx_SET (scratch
, addr
));
20272 new_addr
= scratch
;
20276 /* Make sure the register class can handle offset addresses. */
20277 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
20279 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20281 emit_insn (gen_rtx_SET (scratch
, addr
));
20282 new_addr
= scratch
;
20287 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20292 op0
= XEXP (addr
, 0);
20293 op1
= XEXP (addr
, 1);
20294 if (!base_reg_operand (op0
, Pmode
))
20295 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20297 else if (int_reg_operand (op1
, Pmode
))
20299 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20301 emit_insn (gen_rtx_SET (scratch
, addr
));
20302 new_addr
= scratch
;
20306 /* Quad offsets are restricted and can't handle normal addresses. */
20307 else if (mode_supports_vsx_dform_quad (mode
))
20309 emit_insn (gen_rtx_SET (scratch
, addr
));
20310 new_addr
= scratch
;
20313 /* Make sure the register class can handle offset addresses. */
20314 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
20316 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20318 emit_insn (gen_rtx_SET (scratch
, addr
));
20319 new_addr
= scratch
;
20324 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20331 rs6000_emit_move (scratch
, addr
, Pmode
);
20332 new_addr
= scratch
;
20336 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20339 /* Adjust the address if it changed. */
20340 if (addr
!= new_addr
)
20342 mem
= replace_equiv_address_nv (mem
, new_addr
);
20343 if (TARGET_DEBUG_ADDR
)
20344 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20347 /* Now create the move. */
20349 emit_insn (gen_rtx_SET (mem
, reg
));
20351 emit_insn (gen_rtx_SET (reg
, mem
));
20356 /* Convert reloads involving 64-bit gprs and misaligned offset
20357 addressing, or multiple 32-bit gprs and offsets that are too large,
20358 to use indirect addressing. */
20361 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20363 int regno
= true_regnum (reg
);
20364 enum reg_class rclass
;
20366 rtx scratch_or_premodify
= scratch
;
20368 if (TARGET_DEBUG_ADDR
)
20370 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
20371 store_p
? "store" : "load");
20372 fprintf (stderr
, "reg:\n");
20374 fprintf (stderr
, "mem:\n");
20376 fprintf (stderr
, "scratch:\n");
20377 debug_rtx (scratch
);
20380 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
20381 gcc_assert (GET_CODE (mem
) == MEM
);
20382 rclass
= REGNO_REG_CLASS (regno
);
20383 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
20384 addr
= XEXP (mem
, 0);
20386 if (GET_CODE (addr
) == PRE_MODIFY
)
20388 gcc_assert (REG_P (XEXP (addr
, 0))
20389 && GET_CODE (XEXP (addr
, 1)) == PLUS
20390 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
20391 scratch_or_premodify
= XEXP (addr
, 0);
20392 if (!HARD_REGISTER_P (scratch_or_premodify
))
20393 /* If we have a pseudo here then reload will have arranged
20394 to have it replaced, but only in the original insn.
20395 Use the replacement here too. */
20396 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
20398 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20399 expressions from the original insn, without unsharing them.
20400 Any RTL that points into the original insn will of course
20401 have register replacements applied. That is why we don't
20402 need to look for replacements under the PLUS. */
20403 addr
= XEXP (addr
, 1);
20405 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
20407 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
20409 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
20411 /* Now create the move. */
20413 emit_insn (gen_rtx_SET (mem
, reg
));
20415 emit_insn (gen_rtx_SET (reg
, mem
));
20420 /* Given an rtx X being reloaded into a reg required to be
20421 in class CLASS, return the class of reg to actually use.
20422 In general this is just CLASS; but on some machines
20423 in some cases it is preferable to use a more restrictive class.
20425 On the RS/6000, we have to return NO_REGS when we want to reload a
20426 floating-point CONST_DOUBLE to force it to be copied to memory.
20428 We also don't want to reload integer values into floating-point
20429 registers if we can at all help it. In fact, this can
20430 cause reload to die, if it tries to generate a reload of CTR
20431 into a FP register and discovers it doesn't have the memory location
20434 ??? Would it be a good idea to have reload do the converse, that is
20435 try to reload floating modes into FP registers if possible?
20438 static enum reg_class
20439 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
20441 machine_mode mode
= GET_MODE (x
);
20442 bool is_constant
= CONSTANT_P (x
);
20444 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20445 reload class for it. */
20446 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20447 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
20450 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
20451 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
20454 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20455 the reloading of address expressions using PLUS into floating point
20457 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
20461 /* Zero is always allowed in all VSX registers. */
20462 if (x
== CONST0_RTX (mode
))
20465 /* If this is a vector constant that can be formed with a few Altivec
20466 instructions, we want altivec registers. */
20467 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
20468 return ALTIVEC_REGS
;
20470 /* If this is an integer constant that can easily be loaded into
20471 vector registers, allow it. */
20472 if (CONST_INT_P (x
))
20474 HOST_WIDE_INT value
= INTVAL (x
);
20476 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20477 2.06 can generate it in the Altivec registers with
20481 if (TARGET_P8_VECTOR
)
20483 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20484 return ALTIVEC_REGS
;
20489 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20490 a sign extend in the Altivec registers. */
20491 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
20492 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
20493 return ALTIVEC_REGS
;
20496 /* Force constant to memory. */
20500 /* D-form addressing can easily reload the value. */
20501 if (mode_supports_vmx_dform (mode
)
20502 || mode_supports_vsx_dform_quad (mode
))
20505 /* If this is a scalar floating point value and we don't have D-form
20506 addressing, prefer the traditional floating point registers so that we
20507 can use D-form (register+offset) addressing. */
20508 if (rclass
== VSX_REGS
20509 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
20512 /* Prefer the Altivec registers if Altivec is handling the vector
20513 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20515 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
20516 || mode
== V1TImode
)
20517 return ALTIVEC_REGS
;
20522 if (is_constant
|| GET_CODE (x
) == PLUS
)
20524 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
20525 return GENERAL_REGS
;
20526 if (reg_class_subset_p (BASE_REGS
, rclass
))
20531 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
20532 return GENERAL_REGS
;
20537 /* Debug version of rs6000_preferred_reload_class. */
20538 static enum reg_class
20539 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
20541 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
20544 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20546 reg_class_names
[ret
], reg_class_names
[rclass
],
20547 GET_MODE_NAME (GET_MODE (x
)));
20553 /* If we are copying between FP or AltiVec registers and anything else, we need
20554 a memory location. The exception is when we are targeting ppc64 and the
20555 move to/from fpr to gpr instructions are available. Also, under VSX, you
20556 can copy vector registers from the FP register set to the Altivec register
20557 set and vice versa. */
20560 rs6000_secondary_memory_needed (machine_mode mode
,
20561 reg_class_t from_class
,
20562 reg_class_t to_class
)
20564 enum rs6000_reg_type from_type
, to_type
;
20565 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
20566 || (to_class
== ALTIVEC_REGS
));
20568 /* If a simple/direct move is available, we don't need secondary memory */
20569 from_type
= reg_class_to_reg_type
[(int)from_class
];
20570 to_type
= reg_class_to_reg_type
[(int)to_class
];
20572 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
20573 (secondary_reload_info
*)0, altivec_p
))
20576 /* If we have a floating point or vector register class, we need to use
20577 memory to transfer the data. */
20578 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
20584 /* Debug version of rs6000_secondary_memory_needed. */
20586 rs6000_debug_secondary_memory_needed (machine_mode mode
,
20587 reg_class_t from_class
,
20588 reg_class_t to_class
)
20590 bool ret
= rs6000_secondary_memory_needed (mode
, from_class
, to_class
);
20593 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20594 "to_class = %s, mode = %s\n",
20595 ret
? "true" : "false",
20596 reg_class_names
[from_class
],
20597 reg_class_names
[to_class
],
20598 GET_MODE_NAME (mode
));
20603 /* Return the register class of a scratch register needed to copy IN into
20604 or out of a register in RCLASS in MODE. If it can be done directly,
20605 NO_REGS is returned. */
20607 static enum reg_class
20608 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
20613 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
20615 && MACHOPIC_INDIRECT
20619 /* We cannot copy a symbolic operand directly into anything
20620 other than BASE_REGS for TARGET_ELF. So indicate that a
20621 register from BASE_REGS is needed as an intermediate
20624 On Darwin, pic addresses require a load from memory, which
20625 needs a base register. */
20626 if (rclass
!= BASE_REGS
20627 && (GET_CODE (in
) == SYMBOL_REF
20628 || GET_CODE (in
) == HIGH
20629 || GET_CODE (in
) == LABEL_REF
20630 || GET_CODE (in
) == CONST
))
20634 if (GET_CODE (in
) == REG
)
20636 regno
= REGNO (in
);
20637 if (regno
>= FIRST_PSEUDO_REGISTER
)
20639 regno
= true_regnum (in
);
20640 if (regno
>= FIRST_PSEUDO_REGISTER
)
20644 else if (GET_CODE (in
) == SUBREG
)
20646 regno
= true_regnum (in
);
20647 if (regno
>= FIRST_PSEUDO_REGISTER
)
20653 /* If we have VSX register moves, prefer moving scalar values between
20654 Altivec registers and GPR by going via an FPR (and then via memory)
20655 instead of reloading the secondary memory address for Altivec moves. */
20657 && GET_MODE_SIZE (mode
) < 16
20658 && !mode_supports_vmx_dform (mode
)
20659 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20660 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20661 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20662 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20665 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20667 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20668 || (regno
>= 0 && INT_REGNO_P (regno
)))
20671 /* Constants, memory, and VSX registers can go into VSX registers (both the
20672 traditional floating point and the altivec registers). */
20673 if (rclass
== VSX_REGS
20674 && (regno
== -1 || VSX_REGNO_P (regno
)))
20677 /* Constants, memory, and FP registers can go into FP registers. */
20678 if ((regno
== -1 || FP_REGNO_P (regno
))
20679 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20680 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20682 /* Memory, and AltiVec registers can go into AltiVec registers. */
20683 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20684 && rclass
== ALTIVEC_REGS
)
20687 /* We can copy among the CR registers. */
20688 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20689 && regno
>= 0 && CR_REGNO_P (regno
))
20692 /* Otherwise, we need GENERAL_REGS. */
20693 return GENERAL_REGS
;
20696 /* Debug version of rs6000_secondary_reload_class. */
20697 static enum reg_class
20698 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20699 machine_mode mode
, rtx in
)
20701 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20703 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20704 "mode = %s, input rtx:\n",
20705 reg_class_names
[ret
], reg_class_names
[rclass
],
20706 GET_MODE_NAME (mode
));
20712 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20715 rs6000_can_change_mode_class (machine_mode from
,
20717 reg_class_t rclass
)
20719 unsigned from_size
= GET_MODE_SIZE (from
);
20720 unsigned to_size
= GET_MODE_SIZE (to
);
20722 if (from_size
!= to_size
)
20724 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20726 if (reg_classes_intersect_p (xclass
, rclass
))
20728 unsigned to_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, to
);
20729 unsigned from_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, from
);
20730 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20731 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20733 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20734 single register under VSX because the scalar part of the register
20735 is in the upper 64-bits, and not the lower 64-bits. Types like
20736 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20737 IEEE floating point can't overlap, and neither can small
20740 if (to_float128_vector_p
&& from_float128_vector_p
)
20743 else if (to_float128_vector_p
|| from_float128_vector_p
)
20746 /* TDmode in floating-mode registers must always go into a register
20747 pair with the most significant word in the even-numbered register
20748 to match ISA requirements. In little-endian mode, this does not
20749 match subreg numbering, so we cannot allow subregs. */
20750 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20753 if (from_size
< 8 || to_size
< 8)
20756 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20759 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20768 /* Since the VSX register set includes traditional floating point registers
20769 and altivec registers, just check for the size being different instead of
20770 trying to check whether the modes are vector modes. Otherwise it won't
20771 allow say DF and DI to change classes. For types like TFmode and TDmode
20772 that take 2 64-bit registers, rather than a single 128-bit register, don't
20773 allow subregs of those types to other 128 bit types. */
20774 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20776 unsigned num_regs
= (from_size
+ 15) / 16;
20777 if (hard_regno_nregs (FIRST_FPR_REGNO
, to
) > num_regs
20778 || hard_regno_nregs (FIRST_FPR_REGNO
, from
) > num_regs
)
20781 return (from_size
== 8 || from_size
== 16);
20784 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20785 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20791 /* Debug version of rs6000_can_change_mode_class. */
20793 rs6000_debug_can_change_mode_class (machine_mode from
,
20795 reg_class_t rclass
)
20797 bool ret
= rs6000_can_change_mode_class (from
, to
, rclass
);
20800 "rs6000_can_change_mode_class, return %s, from = %s, "
20801 "to = %s, rclass = %s\n",
20802 ret
? "true" : "false",
20803 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20804 reg_class_names
[rclass
]);
20809 /* Return a string to do a move operation of 128 bits of data. */
20812 rs6000_output_move_128bit (rtx operands
[])
20814 rtx dest
= operands
[0];
20815 rtx src
= operands
[1];
20816 machine_mode mode
= GET_MODE (dest
);
20819 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20820 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20824 dest_regno
= REGNO (dest
);
20825 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20826 dest_fp_p
= FP_REGNO_P (dest_regno
);
20827 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20828 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20833 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20838 src_regno
= REGNO (src
);
20839 src_gpr_p
= INT_REGNO_P (src_regno
);
20840 src_fp_p
= FP_REGNO_P (src_regno
);
20841 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20842 src_vsx_p
= src_fp_p
| src_vmx_p
;
20847 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20850 /* Register moves. */
20851 if (dest_regno
>= 0 && src_regno
>= 0)
20858 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20859 return (WORDS_BIG_ENDIAN
20860 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20861 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20863 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20867 else if (TARGET_VSX
&& dest_vsx_p
)
20870 return "xxlor %x0,%x1,%x1";
20872 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20873 return (WORDS_BIG_ENDIAN
20874 ? "mtvsrdd %x0,%1,%L1"
20875 : "mtvsrdd %x0,%L1,%1");
20877 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20881 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20882 return "vor %0,%1,%1";
20884 else if (dest_fp_p
&& src_fp_p
)
20889 else if (dest_regno
>= 0 && MEM_P (src
))
20893 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20899 else if (TARGET_ALTIVEC
&& dest_vmx_p
20900 && altivec_indexed_or_indirect_operand (src
, mode
))
20901 return "lvx %0,%y1";
20903 else if (TARGET_VSX
&& dest_vsx_p
)
20905 if (mode_supports_vsx_dform_quad (mode
)
20906 && quad_address_p (XEXP (src
, 0), mode
, true))
20907 return "lxv %x0,%1";
20909 else if (TARGET_P9_VECTOR
)
20910 return "lxvx %x0,%y1";
20912 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20913 return "lxvw4x %x0,%y1";
20916 return "lxvd2x %x0,%y1";
20919 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20920 return "lvx %0,%y1";
20922 else if (dest_fp_p
)
20927 else if (src_regno
>= 0 && MEM_P (dest
))
20931 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20932 return "stq %1,%0";
20937 else if (TARGET_ALTIVEC
&& src_vmx_p
20938 && altivec_indexed_or_indirect_operand (src
, mode
))
20939 return "stvx %1,%y0";
20941 else if (TARGET_VSX
&& src_vsx_p
)
20943 if (mode_supports_vsx_dform_quad (mode
)
20944 && quad_address_p (XEXP (dest
, 0), mode
, true))
20945 return "stxv %x1,%0";
20947 else if (TARGET_P9_VECTOR
)
20948 return "stxvx %x1,%y0";
20950 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20951 return "stxvw4x %x1,%y0";
20954 return "stxvd2x %x1,%y0";
20957 else if (TARGET_ALTIVEC
&& src_vmx_p
)
20958 return "stvx %1,%y0";
20965 else if (dest_regno
>= 0
20966 && (GET_CODE (src
) == CONST_INT
20967 || GET_CODE (src
) == CONST_WIDE_INT
20968 || GET_CODE (src
) == CONST_DOUBLE
20969 || GET_CODE (src
) == CONST_VECTOR
))
20974 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
20975 || (dest_vsx_p
&& TARGET_VSX
))
20976 return output_vec_const_move (operands
);
20979 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
20982 /* Validate a 128-bit move. */
20984 rs6000_move_128bit_ok_p (rtx operands
[])
20986 machine_mode mode
= GET_MODE (operands
[0]);
20987 return (gpc_reg_operand (operands
[0], mode
)
20988 || gpc_reg_operand (operands
[1], mode
));
20991 /* Return true if a 128-bit move needs to be split. */
20993 rs6000_split_128bit_ok_p (rtx operands
[])
20995 if (!reload_completed
)
20998 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
21001 if (quad_load_store_p (operands
[0], operands
[1]))
21008 /* Given a comparison operation, return the bit number in CCR to test. We
21009 know this is a valid comparison.
21011 SCC_P is 1 if this is for an scc. That means that %D will have been
21012 used instead of %C, so the bits will be in different places.
21014 Return -1 if OP isn't a valid comparison for some reason. */
21017 ccr_bit (rtx op
, int scc_p
)
21019 enum rtx_code code
= GET_CODE (op
);
21020 machine_mode cc_mode
;
21025 if (!COMPARISON_P (op
))
21028 reg
= XEXP (op
, 0);
21030 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
21032 cc_mode
= GET_MODE (reg
);
21033 cc_regnum
= REGNO (reg
);
21034 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
21036 validate_condition_mode (code
, cc_mode
);
21038 /* When generating a sCOND operation, only positive conditions are
21041 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
21042 || code
== GTU
|| code
== LTU
);
21047 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
21049 return base_bit
+ 2;
21050 case GT
: case GTU
: case UNLE
:
21051 return base_bit
+ 1;
21052 case LT
: case LTU
: case UNGE
:
21054 case ORDERED
: case UNORDERED
:
21055 return base_bit
+ 3;
21058 /* If scc, we will have done a cror to put the bit in the
21059 unordered position. So test that bit. For integer, this is ! LT
21060 unless this is an scc insn. */
21061 return scc_p
? base_bit
+ 3 : base_bit
;
21064 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
21067 gcc_unreachable ();
21071 /* Return the GOT register. */
21074 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
21076 /* The second flow pass currently (June 1999) can't update
21077 regs_ever_live without disturbing other parts of the compiler, so
21078 update it here to make the prolog/epilogue code happy. */
21079 if (!can_create_pseudo_p ()
21080 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
21081 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
21083 crtl
->uses_pic_offset_table
= 1;
21085 return pic_offset_table_rtx
;
21088 static rs6000_stack_t stack_info
;
21090 /* Function to init struct machine_function.
21091 This will be called, via a pointer variable,
21092 from push_function_context. */
21094 static struct machine_function
*
21095 rs6000_init_machine_status (void)
21097 stack_info
.reload_completed
= 0;
21098 return ggc_cleared_alloc
<machine_function
> ();
21101 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21103 /* Write out a function code label. */
21106 rs6000_output_function_entry (FILE *file
, const char *fname
)
21108 if (fname
[0] != '.')
21110 switch (DEFAULT_ABI
)
21113 gcc_unreachable ();
21119 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
21129 RS6000_OUTPUT_BASENAME (file
, fname
);
21132 /* Print an operand. Recognize special options, documented below. */
21135 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21136 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21138 #define SMALL_DATA_RELOC "sda21"
21139 #define SMALL_DATA_REG 0
21143 print_operand (FILE *file
, rtx x
, int code
)
21146 unsigned HOST_WIDE_INT uval
;
21150 /* %a is output_address. */
21152 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21156 /* Like 'J' but get to the GT bit only. */
21157 gcc_assert (REG_P (x
));
21159 /* Bit 1 is GT bit. */
21160 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
21162 /* Add one for shift count in rlinm for scc. */
21163 fprintf (file
, "%d", i
+ 1);
21167 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21170 output_operand_lossage ("invalid %%e value");
21175 if ((uval
& 0xffff) == 0 && uval
!= 0)
21180 /* X is a CR register. Print the number of the EQ bit of the CR */
21181 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21182 output_operand_lossage ("invalid %%E value");
21184 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
21188 /* X is a CR register. Print the shift count needed to move it
21189 to the high-order four bits. */
21190 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21191 output_operand_lossage ("invalid %%f value");
21193 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
21197 /* Similar, but print the count for the rotate in the opposite
21199 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21200 output_operand_lossage ("invalid %%F value");
21202 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
21206 /* X is a constant integer. If it is negative, print "m",
21207 otherwise print "z". This is to make an aze or ame insn. */
21208 if (GET_CODE (x
) != CONST_INT
)
21209 output_operand_lossage ("invalid %%G value");
21210 else if (INTVAL (x
) >= 0)
21217 /* If constant, output low-order five bits. Otherwise, write
21220 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
21222 print_operand (file
, x
, 0);
21226 /* If constant, output low-order six bits. Otherwise, write
21229 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
21231 print_operand (file
, x
, 0);
21235 /* Print `i' if this is a constant, else nothing. */
21241 /* Write the bit number in CCR for jump. */
21242 i
= ccr_bit (x
, 0);
21244 output_operand_lossage ("invalid %%j code");
21246 fprintf (file
, "%d", i
);
21250 /* Similar, but add one for shift count in rlinm for scc and pass
21251 scc flag to `ccr_bit'. */
21252 i
= ccr_bit (x
, 1);
21254 output_operand_lossage ("invalid %%J code");
21256 /* If we want bit 31, write a shift count of zero, not 32. */
21257 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21261 /* X must be a constant. Write the 1's complement of the
21264 output_operand_lossage ("invalid %%k value");
21266 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
21270 /* X must be a symbolic constant on ELF. Write an
21271 expression suitable for an 'addi' that adds in the low 16
21272 bits of the MEM. */
21273 if (GET_CODE (x
) == CONST
)
21275 if (GET_CODE (XEXP (x
, 0)) != PLUS
21276 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
21277 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
21278 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
21279 output_operand_lossage ("invalid %%K value");
21281 print_operand_address (file
, x
);
21282 fputs ("@l", file
);
21285 /* %l is output_asm_label. */
21288 /* Write second word of DImode or DFmode reference. Works on register
21289 or non-indexed memory only. */
21291 fputs (reg_names
[REGNO (x
) + 1], file
);
21292 else if (MEM_P (x
))
21294 machine_mode mode
= GET_MODE (x
);
21295 /* Handle possible auto-increment. Since it is pre-increment and
21296 we have already done it, we can just use an offset of word. */
21297 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21298 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21299 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21301 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21302 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21305 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
21309 if (small_data_operand (x
, GET_MODE (x
)))
21310 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21311 reg_names
[SMALL_DATA_REG
]);
21316 /* Write the number of elements in the vector times 4. */
21317 if (GET_CODE (x
) != PARALLEL
)
21318 output_operand_lossage ("invalid %%N value");
21320 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
21324 /* Similar, but subtract 1 first. */
21325 if (GET_CODE (x
) != PARALLEL
)
21326 output_operand_lossage ("invalid %%O value");
21328 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
21332 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21335 || (i
= exact_log2 (INTVAL (x
))) < 0)
21336 output_operand_lossage ("invalid %%p value");
21338 fprintf (file
, "%d", i
);
21342 /* The operand must be an indirect memory reference. The result
21343 is the register name. */
21344 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
21345 || REGNO (XEXP (x
, 0)) >= 32)
21346 output_operand_lossage ("invalid %%P value");
21348 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
21352 /* This outputs the logical code corresponding to a boolean
21353 expression. The expression may have one or both operands
21354 negated (if one, only the first one). For condition register
21355 logical operations, it will also treat the negated
21356 CR codes as NOTs, but not handle NOTs of them. */
21358 const char *const *t
= 0;
21360 enum rtx_code code
= GET_CODE (x
);
21361 static const char * const tbl
[3][3] = {
21362 { "and", "andc", "nor" },
21363 { "or", "orc", "nand" },
21364 { "xor", "eqv", "xor" } };
21368 else if (code
== IOR
)
21370 else if (code
== XOR
)
21373 output_operand_lossage ("invalid %%q value");
21375 if (GET_CODE (XEXP (x
, 0)) != NOT
)
21379 if (GET_CODE (XEXP (x
, 1)) == NOT
)
21390 if (! TARGET_MFCRF
)
21396 /* X is a CR register. Print the mask for `mtcrf'. */
21397 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21398 output_operand_lossage ("invalid %%R value");
21400 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
21404 /* Low 5 bits of 32 - value */
21406 output_operand_lossage ("invalid %%s value");
21408 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
21412 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21413 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
21415 /* Bit 3 is OV bit. */
21416 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
21418 /* If we want bit 31, write a shift count of zero, not 32. */
21419 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21423 /* Print the symbolic name of a branch target register. */
21424 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
21425 && REGNO (x
) != CTR_REGNO
))
21426 output_operand_lossage ("invalid %%T value");
21427 else if (REGNO (x
) == LR_REGNO
)
21428 fputs ("lr", file
);
21430 fputs ("ctr", file
);
21434 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21435 for use in unsigned operand. */
21438 output_operand_lossage ("invalid %%u value");
21443 if ((uval
& 0xffff) == 0)
21446 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
21450 /* High-order 16 bits of constant for use in signed operand. */
21452 output_operand_lossage ("invalid %%v value");
21454 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
21455 (INTVAL (x
) >> 16) & 0xffff);
21459 /* Print `u' if this has an auto-increment or auto-decrement. */
21461 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
21462 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
21463 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
21468 /* Print the trap code for this operand. */
21469 switch (GET_CODE (x
))
21472 fputs ("eq", file
); /* 4 */
21475 fputs ("ne", file
); /* 24 */
21478 fputs ("lt", file
); /* 16 */
21481 fputs ("le", file
); /* 20 */
21484 fputs ("gt", file
); /* 8 */
21487 fputs ("ge", file
); /* 12 */
21490 fputs ("llt", file
); /* 2 */
21493 fputs ("lle", file
); /* 6 */
21496 fputs ("lgt", file
); /* 1 */
21499 fputs ("lge", file
); /* 5 */
21502 gcc_unreachable ();
21507 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21510 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
21511 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
21513 print_operand (file
, x
, 0);
21517 /* X is a FPR or Altivec register used in a VSX context. */
21518 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
21519 output_operand_lossage ("invalid %%x value");
21522 int reg
= REGNO (x
);
21523 int vsx_reg
= (FP_REGNO_P (reg
)
21525 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
21527 #ifdef TARGET_REGNAMES
21528 if (TARGET_REGNAMES
)
21529 fprintf (file
, "%%vs%d", vsx_reg
);
21532 fprintf (file
, "%d", vsx_reg
);
21538 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
21539 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
21540 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
21545 /* Like 'L', for third word of TImode/PTImode */
21547 fputs (reg_names
[REGNO (x
) + 2], file
);
21548 else if (MEM_P (x
))
21550 machine_mode mode
= GET_MODE (x
);
21551 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21552 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21553 output_address (mode
, plus_constant (Pmode
,
21554 XEXP (XEXP (x
, 0), 0), 8));
21555 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21556 output_address (mode
, plus_constant (Pmode
,
21557 XEXP (XEXP (x
, 0), 0), 8));
21559 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
21560 if (small_data_operand (x
, GET_MODE (x
)))
21561 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21562 reg_names
[SMALL_DATA_REG
]);
21567 /* X is a SYMBOL_REF. Write out the name preceded by a
21568 period and without any trailing data in brackets. Used for function
21569 names. If we are configured for System V (or the embedded ABI) on
21570 the PowerPC, do not emit the period, since those systems do not use
21571 TOCs and the like. */
21572 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
21574 /* For macho, check to see if we need a stub. */
21577 const char *name
= XSTR (x
, 0);
21579 if (darwin_emit_branch_islands
21580 && MACHOPIC_INDIRECT
21581 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
21582 name
= machopic_indirection_name (x
, /*stub_p=*/true);
21584 assemble_name (file
, name
);
21586 else if (!DOT_SYMBOLS
)
21587 assemble_name (file
, XSTR (x
, 0));
21589 rs6000_output_function_entry (file
, XSTR (x
, 0));
21593 /* Like 'L', for last word of TImode/PTImode. */
21595 fputs (reg_names
[REGNO (x
) + 3], file
);
21596 else if (MEM_P (x
))
21598 machine_mode mode
= GET_MODE (x
);
21599 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21600 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21601 output_address (mode
, plus_constant (Pmode
,
21602 XEXP (XEXP (x
, 0), 0), 12));
21603 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21604 output_address (mode
, plus_constant (Pmode
,
21605 XEXP (XEXP (x
, 0), 0), 12));
21607 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
21608 if (small_data_operand (x
, GET_MODE (x
)))
21609 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21610 reg_names
[SMALL_DATA_REG
]);
21614 /* Print AltiVec memory operand. */
21619 gcc_assert (MEM_P (x
));
21623 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
21624 && GET_CODE (tmp
) == AND
21625 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21626 && INTVAL (XEXP (tmp
, 1)) == -16)
21627 tmp
= XEXP (tmp
, 0);
21628 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21629 && GET_CODE (tmp
) == PRE_MODIFY
)
21630 tmp
= XEXP (tmp
, 1);
21632 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21635 if (GET_CODE (tmp
) != PLUS
21636 || !REG_P (XEXP (tmp
, 0))
21637 || !REG_P (XEXP (tmp
, 1)))
21639 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21643 if (REGNO (XEXP (tmp
, 0)) == 0)
21644 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21645 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21647 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21648 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21655 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21656 else if (MEM_P (x
))
21658 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21659 know the width from the mode. */
21660 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21661 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21662 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21663 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21664 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21665 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21666 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21667 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21669 output_address (GET_MODE (x
), XEXP (x
, 0));
21673 if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21674 /* This hack along with a corresponding hack in
21675 rs6000_output_addr_const_extra arranges to output addends
21676 where the assembler expects to find them. eg.
21677 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21678 without this hack would be output as "x@toc+4". We
21680 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21682 output_addr_const (file
, x
);
21687 if (const char *name
= get_some_local_dynamic_name ())
21688 assemble_name (file
, name
);
21690 output_operand_lossage ("'%%&' used without any "
21691 "local dynamic TLS references");
21695 output_operand_lossage ("invalid %%xn code");
21699 /* Print the address of an operand. */
21702 print_operand_address (FILE *file
, rtx x
)
21705 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21706 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21707 || GET_CODE (x
) == LABEL_REF
)
21709 output_addr_const (file
, x
);
21710 if (small_data_operand (x
, GET_MODE (x
)))
21711 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21712 reg_names
[SMALL_DATA_REG
]);
21714 gcc_assert (!TARGET_TOC
);
21716 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21717 && REG_P (XEXP (x
, 1)))
21719 if (REGNO (XEXP (x
, 0)) == 0)
21720 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21721 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21723 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21724 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21726 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21727 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21728 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21729 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21731 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21732 && CONSTANT_P (XEXP (x
, 1)))
21734 fprintf (file
, "lo16(");
21735 output_addr_const (file
, XEXP (x
, 1));
21736 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21740 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21741 && CONSTANT_P (XEXP (x
, 1)))
21743 output_addr_const (file
, XEXP (x
, 1));
21744 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21747 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21749 /* This hack along with a corresponding hack in
21750 rs6000_output_addr_const_extra arranges to output addends
21751 where the assembler expects to find them. eg.
21753 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21754 without this hack would be output as "x@toc+8@l(9)". We
21755 want "x+8@toc@l(9)". */
21756 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21757 if (GET_CODE (x
) == LO_SUM
)
21758 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21760 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21763 gcc_unreachable ();
21766 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21769 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21771 if (GET_CODE (x
) == UNSPEC
)
21772 switch (XINT (x
, 1))
21774 case UNSPEC_TOCREL
:
21775 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21776 && REG_P (XVECEXP (x
, 0, 1))
21777 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21778 output_addr_const (file
, XVECEXP (x
, 0, 0));
21779 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21781 if (INTVAL (tocrel_offset_oac
) >= 0)
21782 fprintf (file
, "+");
21783 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21785 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21788 assemble_name (file
, toc_label_name
);
21791 else if (TARGET_ELF
)
21792 fputs ("@toc", file
);
21796 case UNSPEC_MACHOPIC_OFFSET
:
21797 output_addr_const (file
, XVECEXP (x
, 0, 0));
21799 machopic_output_function_base_name (file
);
21806 /* Target hook for assembling integer objects. The PowerPC version has
21807 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21808 is defined. It also needs to handle DI-mode objects on 64-bit
21812 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21814 #ifdef RELOCATABLE_NEEDS_FIXUP
21815 /* Special handling for SI values. */
21816 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21818 static int recurse
= 0;
21820 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21821 the .fixup section. Since the TOC section is already relocated, we
21822 don't need to mark it here. We used to skip the text section, but it
21823 should never be valid for relocated addresses to be placed in the text
21825 if (DEFAULT_ABI
== ABI_V4
21826 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21827 && in_section
!= toc_section
21829 && !CONST_SCALAR_INT_P (x
)
21835 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21837 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21838 fprintf (asm_out_file
, "\t.long\t(");
21839 output_addr_const (asm_out_file
, x
);
21840 fprintf (asm_out_file
, ")@fixup\n");
21841 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21842 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21843 fprintf (asm_out_file
, "\t.long\t");
21844 assemble_name (asm_out_file
, buf
);
21845 fprintf (asm_out_file
, "\n\t.previous\n");
21849 /* Remove initial .'s to turn a -mcall-aixdesc function
21850 address into the address of the descriptor, not the function
21852 else if (GET_CODE (x
) == SYMBOL_REF
21853 && XSTR (x
, 0)[0] == '.'
21854 && DEFAULT_ABI
== ABI_AIX
)
21856 const char *name
= XSTR (x
, 0);
21857 while (*name
== '.')
21860 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21864 #endif /* RELOCATABLE_NEEDS_FIXUP */
21865 return default_assemble_integer (x
, size
, aligned_p
);
21868 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21869 /* Emit an assembler directive to set symbol visibility for DECL to
21870 VISIBILITY_TYPE. */
21873 rs6000_assemble_visibility (tree decl
, int vis
)
21878 /* Functions need to have their entry point symbol visibility set as
21879 well as their descriptor symbol visibility. */
21880 if (DEFAULT_ABI
== ABI_AIX
21882 && TREE_CODE (decl
) == FUNCTION_DECL
)
21884 static const char * const visibility_types
[] = {
21885 NULL
, "protected", "hidden", "internal"
21888 const char *name
, *type
;
21890 name
= ((* targetm
.strip_name_encoding
)
21891 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21892 type
= visibility_types
[vis
];
21894 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21895 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21898 default_assemble_visibility (decl
, vis
);
21903 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21905 /* Reversal of FP compares takes care -- an ordered compare
21906 becomes an unordered compare and vice versa. */
21907 if (mode
== CCFPmode
21908 && (!flag_finite_math_only
21909 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21910 || code
== UNEQ
|| code
== LTGT
))
21911 return reverse_condition_maybe_unordered (code
);
21913 return reverse_condition (code
);
21916 /* Generate a compare for CODE. Return a brand-new rtx that
21917 represents the result of the compare. */
21920 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21922 machine_mode comp_mode
;
21923 rtx compare_result
;
21924 enum rtx_code code
= GET_CODE (cmp
);
21925 rtx op0
= XEXP (cmp
, 0);
21926 rtx op1
= XEXP (cmp
, 1);
21928 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21929 comp_mode
= CCmode
;
21930 else if (FLOAT_MODE_P (mode
))
21931 comp_mode
= CCFPmode
;
21932 else if (code
== GTU
|| code
== LTU
21933 || code
== GEU
|| code
== LEU
)
21934 comp_mode
= CCUNSmode
;
21935 else if ((code
== EQ
|| code
== NE
)
21936 && unsigned_reg_p (op0
)
21937 && (unsigned_reg_p (op1
)
21938 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21939 /* These are unsigned values, perhaps there will be a later
21940 ordering compare that can be shared with this one. */
21941 comp_mode
= CCUNSmode
;
21943 comp_mode
= CCmode
;
21945 /* If we have an unsigned compare, make sure we don't have a signed value as
21947 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21948 && INTVAL (op1
) < 0)
21950 op0
= copy_rtx_if_shared (op0
);
21951 op1
= force_reg (GET_MODE (op0
), op1
);
21952 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
21955 /* First, the compare. */
21956 compare_result
= gen_reg_rtx (comp_mode
);
21958 /* IEEE 128-bit support in VSX registers when we do not have hardware
21960 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21962 rtx libfunc
= NULL_RTX
;
21963 bool check_nan
= false;
21970 libfunc
= optab_libfunc (eq_optab
, mode
);
21975 libfunc
= optab_libfunc (ge_optab
, mode
);
21980 libfunc
= optab_libfunc (le_optab
, mode
);
21985 libfunc
= optab_libfunc (unord_optab
, mode
);
21986 code
= (code
== UNORDERED
) ? NE
: EQ
;
21992 libfunc
= optab_libfunc (ge_optab
, mode
);
21993 code
= (code
== UNGE
) ? GE
: GT
;
21999 libfunc
= optab_libfunc (le_optab
, mode
);
22000 code
= (code
== UNLE
) ? LE
: LT
;
22006 libfunc
= optab_libfunc (eq_optab
, mode
);
22007 code
= (code
= UNEQ
) ? EQ
: NE
;
22011 gcc_unreachable ();
22014 gcc_assert (libfunc
);
22017 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
22018 SImode
, op0
, mode
, op1
, mode
);
22020 /* The library signals an exception for signalling NaNs, so we need to
22021 handle isgreater, etc. by first checking isordered. */
22024 rtx ne_rtx
, normal_dest
, unord_dest
;
22025 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
22026 rtx join_label
= gen_label_rtx ();
22027 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
22028 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
22031 /* Test for either value being a NaN. */
22032 gcc_assert (unord_func
);
22033 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
22034 SImode
, op0
, mode
, op1
, mode
);
22036 /* Set value (0) if either value is a NaN, and jump to the join
22038 dest
= gen_reg_rtx (SImode
);
22039 emit_move_insn (dest
, const1_rtx
);
22040 emit_insn (gen_rtx_SET (unord_cmp
,
22041 gen_rtx_COMPARE (comp_mode
, unord_dest
,
22044 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
22045 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22046 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
22050 /* Do the normal comparison, knowing that the values are not
22052 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
22053 SImode
, op0
, mode
, op1
, mode
);
22055 emit_insn (gen_cstoresi4 (dest
,
22056 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
22058 normal_dest
, const0_rtx
));
22060 /* Join NaN and non-Nan paths. Compare dest against 0. */
22061 emit_label (join_label
);
22065 emit_insn (gen_rtx_SET (compare_result
,
22066 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
22071 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22072 CLOBBERs to match cmptf_internal2 pattern. */
22073 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
22074 && FLOAT128_IBM_P (GET_MODE (op0
))
22075 && TARGET_HARD_FLOAT
)
22076 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22078 gen_rtx_SET (compare_result
,
22079 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
22080 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22081 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22082 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22083 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22084 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22085 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22086 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22087 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
22088 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
22089 else if (GET_CODE (op1
) == UNSPEC
22090 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
22092 rtx op1b
= XVECEXP (op1
, 0, 0);
22093 comp_mode
= CCEQmode
;
22094 compare_result
= gen_reg_rtx (CCEQmode
);
22096 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
22098 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
22101 emit_insn (gen_rtx_SET (compare_result
,
22102 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
22105 /* Some kinds of FP comparisons need an OR operation;
22106 under flag_finite_math_only we don't bother. */
22107 if (FLOAT_MODE_P (mode
)
22108 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
22109 && !flag_finite_math_only
22110 && (code
== LE
|| code
== GE
22111 || code
== UNEQ
|| code
== LTGT
22112 || code
== UNGT
|| code
== UNLT
))
22114 enum rtx_code or1
, or2
;
22115 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
22116 rtx or_result
= gen_reg_rtx (CCEQmode
);
22120 case LE
: or1
= LT
; or2
= EQ
; break;
22121 case GE
: or1
= GT
; or2
= EQ
; break;
22122 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
22123 case LTGT
: or1
= LT
; or2
= GT
; break;
22124 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
22125 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
22126 default: gcc_unreachable ();
22128 validate_condition_mode (or1
, comp_mode
);
22129 validate_condition_mode (or2
, comp_mode
);
22130 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
22131 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
22132 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
22133 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
22135 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
22137 compare_result
= or_result
;
22141 validate_condition_mode (code
, GET_MODE (compare_result
));
22143 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
22147 /* Return the diagnostic message string if the binary operation OP is
22148 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22151 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
22155 machine_mode mode1
= TYPE_MODE (type1
);
22156 machine_mode mode2
= TYPE_MODE (type2
);
22158 /* For complex modes, use the inner type. */
22159 if (COMPLEX_MODE_P (mode1
))
22160 mode1
= GET_MODE_INNER (mode1
);
22162 if (COMPLEX_MODE_P (mode2
))
22163 mode2
= GET_MODE_INNER (mode2
);
22165 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22166 double to intermix unless -mfloat128-convert. */
22167 if (mode1
== mode2
)
22170 if (!TARGET_FLOAT128_CVT
)
22172 if ((mode1
== KFmode
&& mode2
== IFmode
)
22173 || (mode1
== IFmode
&& mode2
== KFmode
))
22174 return N_("__float128 and __ibm128 cannot be used in the same "
22177 if (TARGET_IEEEQUAD
22178 && ((mode1
== IFmode
&& mode2
== TFmode
)
22179 || (mode1
== TFmode
&& mode2
== IFmode
)))
22180 return N_("__ibm128 and long double cannot be used in the same "
22183 if (!TARGET_IEEEQUAD
22184 && ((mode1
== KFmode
&& mode2
== TFmode
)
22185 || (mode1
== TFmode
&& mode2
== KFmode
)))
22186 return N_("__float128 and long double cannot be used in the same "
22194 /* Expand floating point conversion to/from __float128 and __ibm128. */
22197 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
22199 machine_mode dest_mode
= GET_MODE (dest
);
22200 machine_mode src_mode
= GET_MODE (src
);
22201 convert_optab cvt
= unknown_optab
;
22202 bool do_move
= false;
22203 rtx libfunc
= NULL_RTX
;
22205 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
22206 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
22210 rtx_2func_t from_df
;
22211 rtx_2func_t from_sf
;
22212 rtx_2func_t from_si_sign
;
22213 rtx_2func_t from_si_uns
;
22214 rtx_2func_t from_di_sign
;
22215 rtx_2func_t from_di_uns
;
22218 rtx_2func_t to_si_sign
;
22219 rtx_2func_t to_si_uns
;
22220 rtx_2func_t to_di_sign
;
22221 rtx_2func_t to_di_uns
;
22222 } hw_conversions
[2] = {
22223 /* convertions to/from KFmode */
22225 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
22226 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
22227 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
22228 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
22229 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
22230 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
22231 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
22232 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
22233 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
22234 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
22235 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
22236 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
22239 /* convertions to/from TFmode */
22241 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
22242 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
22243 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
22244 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
22245 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
22246 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
22247 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
22248 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
22249 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
22250 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
22251 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
22252 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
22256 if (dest_mode
== src_mode
)
22257 gcc_unreachable ();
22259 /* Eliminate memory operations. */
22261 src
= force_reg (src_mode
, src
);
22265 rtx tmp
= gen_reg_rtx (dest_mode
);
22266 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
22267 rs6000_emit_move (dest
, tmp
, dest_mode
);
22271 /* Convert to IEEE 128-bit floating point. */
22272 if (FLOAT128_IEEE_P (dest_mode
))
22274 if (dest_mode
== KFmode
)
22276 else if (dest_mode
== TFmode
)
22279 gcc_unreachable ();
22285 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
22290 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
22296 if (FLOAT128_IBM_P (src_mode
))
22305 cvt
= ufloat_optab
;
22306 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
22310 cvt
= sfloat_optab
;
22311 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
22318 cvt
= ufloat_optab
;
22319 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
22323 cvt
= sfloat_optab
;
22324 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
22329 gcc_unreachable ();
22333 /* Convert from IEEE 128-bit floating point. */
22334 else if (FLOAT128_IEEE_P (src_mode
))
22336 if (src_mode
== KFmode
)
22338 else if (src_mode
== TFmode
)
22341 gcc_unreachable ();
22347 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
22352 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
22358 if (FLOAT128_IBM_P (dest_mode
))
22368 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
22373 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
22381 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
22386 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
22391 gcc_unreachable ();
22395 /* Both IBM format. */
22396 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
22400 gcc_unreachable ();
22402 /* Handle conversion between TFmode/KFmode. */
22404 emit_move_insn (dest
, gen_lowpart (dest_mode
, src
));
22406 /* Handle conversion if we have hardware support. */
22407 else if (TARGET_FLOAT128_HW
&& hw_convert
)
22408 emit_insn ((hw_convert
) (dest
, src
));
22410 /* Call an external function to do the conversion. */
22411 else if (cvt
!= unknown_optab
)
22413 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
22414 gcc_assert (libfunc
!= NULL_RTX
);
22416 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
,
22419 gcc_assert (dest2
!= NULL_RTX
);
22420 if (!rtx_equal_p (dest
, dest2
))
22421 emit_move_insn (dest
, dest2
);
22425 gcc_unreachable ();
22431 /* Emit the RTL for an sISEL pattern. */
22434 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
22436 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
22439 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22440 can be used as that dest register. Return the dest register. */
22443 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
22445 if (op2
== const0_rtx
)
22448 if (GET_CODE (scratch
) == SCRATCH
)
22449 scratch
= gen_reg_rtx (mode
);
22451 if (logical_operand (op2
, mode
))
22452 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
22454 emit_insn (gen_rtx_SET (scratch
,
22455 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
22461 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
22464 machine_mode op_mode
;
22465 enum rtx_code cond_code
;
22466 rtx result
= operands
[0];
22468 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
22469 cond_code
= GET_CODE (condition_rtx
);
22471 if (cond_code
== NE
22472 || cond_code
== GE
|| cond_code
== LE
22473 || cond_code
== GEU
|| cond_code
== LEU
22474 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
22476 rtx not_result
= gen_reg_rtx (CCEQmode
);
22477 rtx not_op
, rev_cond_rtx
;
22478 machine_mode cc_mode
;
22480 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
22482 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
22483 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
22484 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
22485 emit_insn (gen_rtx_SET (not_result
, not_op
));
22486 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
22489 op_mode
= GET_MODE (XEXP (operands
[1], 0));
22490 if (op_mode
== VOIDmode
)
22491 op_mode
= GET_MODE (XEXP (operands
[1], 1));
22493 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
22495 PUT_MODE (condition_rtx
, DImode
);
22496 convert_move (result
, condition_rtx
, 0);
22500 PUT_MODE (condition_rtx
, SImode
);
22501 emit_insn (gen_rtx_SET (result
, condition_rtx
));
22505 /* Emit a branch of kind CODE to location LOC. */
22508 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
22510 rtx condition_rtx
, loc_ref
;
22512 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
22513 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
22514 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22515 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
22516 loc_ref
, pc_rtx
)));
22519 /* Return the string to output a conditional branch to LABEL, which is
22520 the operand template of the label, or NULL if the branch is really a
22521 conditional return.
22523 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22524 condition code register and its mode specifies what kind of
22525 comparison we made.
22527 REVERSED is nonzero if we should reverse the sense of the comparison.
22529 INSN is the insn. */
22532 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
22534 static char string
[64];
22535 enum rtx_code code
= GET_CODE (op
);
22536 rtx cc_reg
= XEXP (op
, 0);
22537 machine_mode mode
= GET_MODE (cc_reg
);
22538 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
22539 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
22540 int really_reversed
= reversed
^ need_longbranch
;
22546 validate_condition_mode (code
, mode
);
22548 /* Work out which way this really branches. We could use
22549 reverse_condition_maybe_unordered here always but this
22550 makes the resulting assembler clearer. */
22551 if (really_reversed
)
22553 /* Reversal of FP compares takes care -- an ordered compare
22554 becomes an unordered compare and vice versa. */
22555 if (mode
== CCFPmode
)
22556 code
= reverse_condition_maybe_unordered (code
);
22558 code
= reverse_condition (code
);
22563 /* Not all of these are actually distinct opcodes, but
22564 we distinguish them for clarity of the resulting assembler. */
22565 case NE
: case LTGT
:
22566 ccode
= "ne"; break;
22567 case EQ
: case UNEQ
:
22568 ccode
= "eq"; break;
22570 ccode
= "ge"; break;
22571 case GT
: case GTU
: case UNGT
:
22572 ccode
= "gt"; break;
22574 ccode
= "le"; break;
22575 case LT
: case LTU
: case UNLT
:
22576 ccode
= "lt"; break;
22577 case UNORDERED
: ccode
= "un"; break;
22578 case ORDERED
: ccode
= "nu"; break;
22579 case UNGE
: ccode
= "nl"; break;
22580 case UNLE
: ccode
= "ng"; break;
22582 gcc_unreachable ();
22585 /* Maybe we have a guess as to how likely the branch is. */
22587 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
22588 if (note
!= NULL_RTX
)
22590 /* PROB is the difference from 50%. */
22591 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
22592 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
22594 /* Only hint for highly probable/improbable branches on newer cpus when
22595 we have real profile data, as static prediction overrides processor
22596 dynamic prediction. For older cpus we may as well always hint, but
22597 assume not taken for branches that are very close to 50% as a
22598 mispredicted taken branch is more expensive than a
22599 mispredicted not-taken branch. */
22600 if (rs6000_always_hint
22601 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
22602 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
22603 && br_prob_note_reliable_p (note
)))
22605 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
22606 && ((prob
> 0) ^ need_longbranch
))
22614 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
22616 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
22618 /* We need to escape any '%' characters in the reg_names string.
22619 Assume they'd only be the first character.... */
22620 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
22622 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
22626 /* If the branch distance was too far, we may have to use an
22627 unconditional branch to go the distance. */
22628 if (need_longbranch
)
22629 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22631 s
+= sprintf (s
, ",%s", label
);
22637 /* Return insn for VSX or Altivec comparisons. */
22640 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22643 machine_mode mode
= GET_MODE (op0
);
22651 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22662 mask
= gen_reg_rtx (mode
);
22663 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22670 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22671 DMODE is expected destination mode. This is a recursive function. */
22674 rs6000_emit_vector_compare (enum rtx_code rcode
,
22676 machine_mode dmode
)
22679 bool swap_operands
= false;
22680 bool try_again
= false;
22682 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22683 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22685 /* See if the comparison works as is. */
22686 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22694 swap_operands
= true;
22699 swap_operands
= true;
22707 /* Invert condition and try again.
22708 e.g., A != B becomes ~(A==B). */
22710 enum rtx_code rev_code
;
22711 enum insn_code nor_code
;
22714 rev_code
= reverse_condition_maybe_unordered (rcode
);
22715 if (rev_code
== UNKNOWN
)
22718 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22719 if (nor_code
== CODE_FOR_nothing
)
22722 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22726 mask
= gen_reg_rtx (dmode
);
22727 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22735 /* Try GT/GTU/LT/LTU OR EQ */
22738 enum insn_code ior_code
;
22739 enum rtx_code new_code
;
22760 gcc_unreachable ();
22763 ior_code
= optab_handler (ior_optab
, dmode
);
22764 if (ior_code
== CODE_FOR_nothing
)
22767 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22771 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22775 mask
= gen_reg_rtx (dmode
);
22776 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22787 std::swap (op0
, op1
);
22789 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22794 /* You only get two chances. */
22798 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22799 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22800 operands for the relation operation COND. */
22803 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22804 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22806 machine_mode dest_mode
= GET_MODE (dest
);
22807 machine_mode mask_mode
= GET_MODE (cc_op0
);
22808 enum rtx_code rcode
= GET_CODE (cond
);
22809 machine_mode cc_mode
= CCmode
;
22812 bool invert_move
= false;
22814 if (VECTOR_UNIT_NONE_P (dest_mode
))
22817 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22818 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22822 /* Swap operands if we can, and fall back to doing the operation as
22823 specified, and doing a NOR to invert the test. */
22829 /* Invert condition and try again.
22830 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22831 invert_move
= true;
22832 rcode
= reverse_condition_maybe_unordered (rcode
);
22833 if (rcode
== UNKNOWN
)
22839 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22841 /* Invert condition to avoid compound test. */
22842 invert_move
= true;
22843 rcode
= reverse_condition (rcode
);
22851 /* Mark unsigned tests with CCUNSmode. */
22852 cc_mode
= CCUNSmode
;
22854 /* Invert condition to avoid compound test if necessary. */
22855 if (rcode
== GEU
|| rcode
== LEU
)
22857 invert_move
= true;
22858 rcode
= reverse_condition (rcode
);
22866 /* Get the vector mask for the given relational operations. */
22867 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22873 std::swap (op_true
, op_false
);
22875 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22876 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22877 && (GET_CODE (op_true
) == CONST_VECTOR
22878 || GET_CODE (op_false
) == CONST_VECTOR
))
22880 rtx constant_0
= CONST0_RTX (dest_mode
);
22881 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22883 if (op_true
== constant_m1
&& op_false
== constant_0
)
22885 emit_move_insn (dest
, mask
);
22889 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22891 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22895 /* If we can't use the vector comparison directly, perhaps we can use
22896 the mask for the true or false fields, instead of loading up a
22898 if (op_true
== constant_m1
)
22901 if (op_false
== constant_0
)
22905 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22906 op_true
= force_reg (dest_mode
, op_true
);
22908 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22909 op_false
= force_reg (dest_mode
, op_false
);
22911 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22912 CONST0_RTX (dest_mode
));
22913 emit_insn (gen_rtx_SET (dest
,
22914 gen_rtx_IF_THEN_ELSE (dest_mode
,
22921 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22922 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22923 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22924 hardware has no such operation. */
22927 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22929 enum rtx_code code
= GET_CODE (op
);
22930 rtx op0
= XEXP (op
, 0);
22931 rtx op1
= XEXP (op
, 1);
22932 machine_mode compare_mode
= GET_MODE (op0
);
22933 machine_mode result_mode
= GET_MODE (dest
);
22934 bool max_p
= false;
22936 if (result_mode
!= compare_mode
)
22939 if (code
== GE
|| code
== GT
)
22941 else if (code
== LE
|| code
== LT
)
22946 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22949 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22955 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
22959 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22960 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22961 operands of the last comparison is nonzero/true, FALSE_COND if it is
22962 zero/false. Return 0 if the hardware has no such operation. */
22965 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22967 enum rtx_code code
= GET_CODE (op
);
22968 rtx op0
= XEXP (op
, 0);
22969 rtx op1
= XEXP (op
, 1);
22970 machine_mode result_mode
= GET_MODE (dest
);
22975 if (!can_create_pseudo_p ())
22988 code
= swap_condition (code
);
22989 std::swap (op0
, op1
);
22996 /* Generate: [(parallel [(set (dest)
22997 (if_then_else (op (cmp1) (cmp2))
23000 (clobber (scratch))])]. */
23002 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
23003 cmove_rtx
= gen_rtx_SET (dest
,
23004 gen_rtx_IF_THEN_ELSE (result_mode
,
23009 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
23010 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
23011 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
23016 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23017 operands of the last comparison is nonzero/true, FALSE_COND if it
23018 is zero/false. Return 0 if the hardware has no such operation. */
23021 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23023 enum rtx_code code
= GET_CODE (op
);
23024 rtx op0
= XEXP (op
, 0);
23025 rtx op1
= XEXP (op
, 1);
23026 machine_mode compare_mode
= GET_MODE (op0
);
23027 machine_mode result_mode
= GET_MODE (dest
);
23029 bool is_against_zero
;
23031 /* These modes should always match. */
23032 if (GET_MODE (op1
) != compare_mode
23033 /* In the isel case however, we can use a compare immediate, so
23034 op1 may be a small constant. */
23035 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
23037 if (GET_MODE (true_cond
) != result_mode
)
23039 if (GET_MODE (false_cond
) != result_mode
)
23042 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23043 if (TARGET_P9_MINMAX
23044 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
23045 && (result_mode
== SFmode
|| result_mode
== DFmode
))
23047 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
23050 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
23054 /* Don't allow using floating point comparisons for integer results for
23056 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
23059 /* First, work out if the hardware can do this at all, or
23060 if it's too slow.... */
23061 if (!FLOAT_MODE_P (compare_mode
))
23064 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
23068 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
23070 /* A floating-point subtract might overflow, underflow, or produce
23071 an inexact result, thus changing the floating-point flags, so it
23072 can't be generated if we care about that. It's safe if one side
23073 of the construct is zero, since then no subtract will be
23075 if (SCALAR_FLOAT_MODE_P (compare_mode
)
23076 && flag_trapping_math
&& ! is_against_zero
)
23079 /* Eliminate half of the comparisons by switching operands, this
23080 makes the remaining code simpler. */
23081 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
23082 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
23084 code
= reverse_condition_maybe_unordered (code
);
23086 true_cond
= false_cond
;
23090 /* UNEQ and LTGT take four instructions for a comparison with zero,
23091 it'll probably be faster to use a branch here too. */
23092 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
23095 /* We're going to try to implement comparisons by performing
23096 a subtract, then comparing against zero. Unfortunately,
23097 Inf - Inf is NaN which is not zero, and so if we don't
23098 know that the operand is finite and the comparison
23099 would treat EQ different to UNORDERED, we can't do it. */
23100 if (HONOR_INFINITIES (compare_mode
)
23101 && code
!= GT
&& code
!= UNGE
23102 && (GET_CODE (op1
) != CONST_DOUBLE
23103 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
23104 /* Constructs of the form (a OP b ? a : b) are safe. */
23105 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
23106 || (! rtx_equal_p (op0
, true_cond
)
23107 && ! rtx_equal_p (op1
, true_cond
))))
23110 /* At this point we know we can use fsel. */
23112 /* Reduce the comparison to a comparison against zero. */
23113 if (! is_against_zero
)
23115 temp
= gen_reg_rtx (compare_mode
);
23116 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
23118 op1
= CONST0_RTX (compare_mode
);
23121 /* If we don't care about NaNs we can reduce some of the comparisons
23122 down to faster ones. */
23123 if (! HONOR_NANS (compare_mode
))
23129 true_cond
= false_cond
;
23142 /* Now, reduce everything down to a GE. */
23149 temp
= gen_reg_rtx (compare_mode
);
23150 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23155 temp
= gen_reg_rtx (compare_mode
);
23156 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
23161 temp
= gen_reg_rtx (compare_mode
);
23162 emit_insn (gen_rtx_SET (temp
,
23163 gen_rtx_NEG (compare_mode
,
23164 gen_rtx_ABS (compare_mode
, op0
))));
23169 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23170 temp
= gen_reg_rtx (result_mode
);
23171 emit_insn (gen_rtx_SET (temp
,
23172 gen_rtx_IF_THEN_ELSE (result_mode
,
23173 gen_rtx_GE (VOIDmode
,
23175 true_cond
, false_cond
)));
23176 false_cond
= true_cond
;
23179 temp
= gen_reg_rtx (compare_mode
);
23180 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23185 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23186 temp
= gen_reg_rtx (result_mode
);
23187 emit_insn (gen_rtx_SET (temp
,
23188 gen_rtx_IF_THEN_ELSE (result_mode
,
23189 gen_rtx_GE (VOIDmode
,
23191 true_cond
, false_cond
)));
23192 true_cond
= false_cond
;
23195 temp
= gen_reg_rtx (compare_mode
);
23196 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23201 gcc_unreachable ();
23204 emit_insn (gen_rtx_SET (dest
,
23205 gen_rtx_IF_THEN_ELSE (result_mode
,
23206 gen_rtx_GE (VOIDmode
,
23208 true_cond
, false_cond
)));
23212 /* Same as above, but for ints (isel). */
23215 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23217 rtx condition_rtx
, cr
;
23218 machine_mode mode
= GET_MODE (dest
);
23219 enum rtx_code cond_code
;
23220 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
23223 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
23226 /* We still have to do the compare, because isel doesn't do a
23227 compare, it just looks at the CRx bits set by a previous compare
23229 condition_rtx
= rs6000_generate_compare (op
, mode
);
23230 cond_code
= GET_CODE (condition_rtx
);
23231 cr
= XEXP (condition_rtx
, 0);
23232 signedp
= GET_MODE (cr
) == CCmode
;
23234 isel_func
= (mode
== SImode
23235 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
23236 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
23240 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
23241 /* isel handles these directly. */
23245 /* We need to swap the sense of the comparison. */
23247 std::swap (false_cond
, true_cond
);
23248 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
23253 false_cond
= force_reg (mode
, false_cond
);
23254 if (true_cond
!= const0_rtx
)
23255 true_cond
= force_reg (mode
, true_cond
);
23257 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
23263 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
23265 machine_mode mode
= GET_MODE (op0
);
23269 /* VSX/altivec have direct min/max insns. */
23270 if ((code
== SMAX
|| code
== SMIN
)
23271 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
23272 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
23274 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
23278 if (code
== SMAX
|| code
== SMIN
)
23283 if (code
== SMAX
|| code
== UMAX
)
23284 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23285 op0
, op1
, mode
, 0);
23287 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23288 op1
, op0
, mode
, 0);
23289 gcc_assert (target
);
23290 if (target
!= dest
)
23291 emit_move_insn (dest
, target
);
23294 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23295 for the value to come from memory or if it is already loaded into a GPR. */
23298 rs6000_split_signbit (rtx dest
, rtx src
)
23300 machine_mode d_mode
= GET_MODE (dest
);
23301 machine_mode s_mode
= GET_MODE (src
);
23302 rtx dest_di
= (d_mode
== DImode
) ? dest
: gen_lowpart (DImode
, dest
);
23303 rtx shift_reg
= dest_di
;
23305 gcc_assert (FLOAT128_IEEE_P (s_mode
) && TARGET_POWERPC64
);
23309 rtx mem
= (WORDS_BIG_ENDIAN
23310 ? adjust_address (src
, DImode
, 0)
23311 : adjust_address (src
, DImode
, 8));
23312 emit_insn (gen_rtx_SET (dest_di
, mem
));
23317 unsigned int r
= reg_or_subregno (src
);
23319 if (INT_REGNO_P (r
))
23320 shift_reg
= gen_rtx_REG (DImode
, r
+ (BYTES_BIG_ENDIAN
== 0));
23324 /* Generate the special mfvsrd instruction to get it in a GPR. */
23325 gcc_assert (VSX_REGNO_P (r
));
23326 if (s_mode
== KFmode
)
23327 emit_insn (gen_signbitkf2_dm2 (dest_di
, src
));
23329 emit_insn (gen_signbittf2_dm2 (dest_di
, src
));
23333 emit_insn (gen_lshrdi3 (dest_di
, shift_reg
, GEN_INT (63)));
23337 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23338 COND is true. Mark the jump as unlikely to be taken. */
23341 emit_unlikely_jump (rtx cond
, rtx label
)
23343 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
23344 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
23345 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
23348 /* A subroutine of the atomic operation splitters. Emit a load-locked
23349 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23350 the zero_extend operation. */
23353 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
23355 rtx (*fn
) (rtx
, rtx
) = NULL
;
23360 fn
= gen_load_lockedqi
;
23363 fn
= gen_load_lockedhi
;
23366 if (GET_MODE (mem
) == QImode
)
23367 fn
= gen_load_lockedqi_si
;
23368 else if (GET_MODE (mem
) == HImode
)
23369 fn
= gen_load_lockedhi_si
;
23371 fn
= gen_load_lockedsi
;
23374 fn
= gen_load_lockeddi
;
23377 fn
= gen_load_lockedti
;
23380 gcc_unreachable ();
23382 emit_insn (fn (reg
, mem
));
23385 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23386 instruction in MODE. */
23389 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
23391 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
23396 fn
= gen_store_conditionalqi
;
23399 fn
= gen_store_conditionalhi
;
23402 fn
= gen_store_conditionalsi
;
23405 fn
= gen_store_conditionaldi
;
23408 fn
= gen_store_conditionalti
;
23411 gcc_unreachable ();
23414 /* Emit sync before stwcx. to address PPC405 Erratum. */
23415 if (PPC405_ERRATUM77
)
23416 emit_insn (gen_hwsync ());
23418 emit_insn (fn (res
, mem
, val
));
23421 /* Expand barriers before and after a load_locked/store_cond sequence. */
23424 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
23426 rtx addr
= XEXP (mem
, 0);
23428 if (!legitimate_indirect_address_p (addr
, reload_completed
)
23429 && !legitimate_indexed_address_p (addr
, reload_completed
))
23431 addr
= force_reg (Pmode
, addr
);
23432 mem
= replace_equiv_address_nv (mem
, addr
);
23437 case MEMMODEL_RELAXED
:
23438 case MEMMODEL_CONSUME
:
23439 case MEMMODEL_ACQUIRE
:
23441 case MEMMODEL_RELEASE
:
23442 case MEMMODEL_ACQ_REL
:
23443 emit_insn (gen_lwsync ());
23445 case MEMMODEL_SEQ_CST
:
23446 emit_insn (gen_hwsync ());
23449 gcc_unreachable ();
23455 rs6000_post_atomic_barrier (enum memmodel model
)
23459 case MEMMODEL_RELAXED
:
23460 case MEMMODEL_CONSUME
:
23461 case MEMMODEL_RELEASE
:
23463 case MEMMODEL_ACQUIRE
:
23464 case MEMMODEL_ACQ_REL
:
23465 case MEMMODEL_SEQ_CST
:
23466 emit_insn (gen_isync ());
23469 gcc_unreachable ();
23473 /* A subroutine of the various atomic expanders. For sub-word operations,
23474 we must adjust things to operate on SImode. Given the original MEM,
23475 return a new aligned memory. Also build and return the quantities by
23476 which to shift and mask. */
23479 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
23481 rtx addr
, align
, shift
, mask
, mem
;
23482 HOST_WIDE_INT shift_mask
;
23483 machine_mode mode
= GET_MODE (orig_mem
);
23485 /* For smaller modes, we have to implement this via SImode. */
23486 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
23488 addr
= XEXP (orig_mem
, 0);
23489 addr
= force_reg (GET_MODE (addr
), addr
);
23491 /* Aligned memory containing subword. Generate a new memory. We
23492 do not want any of the existing MEM_ATTR data, as we're now
23493 accessing memory outside the original object. */
23494 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
23495 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23496 mem
= gen_rtx_MEM (SImode
, align
);
23497 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
23498 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
23499 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
23501 /* Shift amount for subword relative to aligned word. */
23502 shift
= gen_reg_rtx (SImode
);
23503 addr
= gen_lowpart (SImode
, addr
);
23504 rtx tmp
= gen_reg_rtx (SImode
);
23505 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
23506 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
23507 if (BYTES_BIG_ENDIAN
)
23508 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
23509 shift
, 1, OPTAB_LIB_WIDEN
);
23512 /* Mask for insertion. */
23513 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
23514 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23520 /* A subroutine of the various atomic expanders. For sub-word operands,
23521 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23524 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
23528 x
= gen_reg_rtx (SImode
);
23529 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
23530 gen_rtx_NOT (SImode
, mask
),
23533 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
23538 /* A subroutine of the various atomic expanders. For sub-word operands,
23539 extract WIDE to NARROW via SHIFT. */
23542 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
23544 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
23545 wide
, 1, OPTAB_LIB_WIDEN
);
23546 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
23549 /* Expand an atomic compare and swap operation. */
23552 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
23554 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
23555 rtx label1
, label2
, x
, mask
, shift
;
23556 machine_mode mode
, orig_mode
;
23557 enum memmodel mod_s
, mod_f
;
23560 boolval
= operands
[0];
23561 retval
= operands
[1];
23563 oldval
= operands
[3];
23564 newval
= operands
[4];
23565 is_weak
= (INTVAL (operands
[5]) != 0);
23566 mod_s
= memmodel_base (INTVAL (operands
[6]));
23567 mod_f
= memmodel_base (INTVAL (operands
[7]));
23568 orig_mode
= mode
= GET_MODE (mem
);
23570 mask
= shift
= NULL_RTX
;
23571 if (mode
== QImode
|| mode
== HImode
)
23573 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23574 lwarx and shift/mask operations. With power8, we need to do the
23575 comparison in SImode, but the store is still done in QI/HImode. */
23576 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
23578 if (!TARGET_SYNC_HI_QI
)
23580 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23582 /* Shift and mask OLDVAL into position with the word. */
23583 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
23584 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23586 /* Shift and mask NEWVAL into position within the word. */
23587 newval
= convert_modes (SImode
, mode
, newval
, 1);
23588 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
23589 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23592 /* Prepare to adjust the return value. */
23593 retval
= gen_reg_rtx (SImode
);
23596 else if (reg_overlap_mentioned_p (retval
, oldval
))
23597 oldval
= copy_to_reg (oldval
);
23599 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
23600 oldval
= copy_to_mode_reg (mode
, oldval
);
23602 if (reg_overlap_mentioned_p (retval
, newval
))
23603 newval
= copy_to_reg (newval
);
23605 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
23610 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23611 emit_label (XEXP (label1
, 0));
23613 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23615 emit_load_locked (mode
, retval
, mem
);
23619 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
23620 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23622 cond
= gen_reg_rtx (CCmode
);
23623 /* If we have TImode, synthesize a comparison. */
23624 if (mode
!= TImode
)
23625 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
23628 rtx xor1_result
= gen_reg_rtx (DImode
);
23629 rtx xor2_result
= gen_reg_rtx (DImode
);
23630 rtx or_result
= gen_reg_rtx (DImode
);
23631 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
23632 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
23633 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
23634 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
23636 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
23637 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
23638 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
23639 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
23642 emit_insn (gen_rtx_SET (cond
, x
));
23644 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23645 emit_unlikely_jump (x
, label2
);
23649 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
23651 emit_store_conditional (orig_mode
, cond
, mem
, x
);
23655 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23656 emit_unlikely_jump (x
, label1
);
23659 if (!is_mm_relaxed (mod_f
))
23660 emit_label (XEXP (label2
, 0));
23662 rs6000_post_atomic_barrier (mod_s
);
23664 if (is_mm_relaxed (mod_f
))
23665 emit_label (XEXP (label2
, 0));
23668 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
23669 else if (mode
!= GET_MODE (operands
[1]))
23670 convert_move (operands
[1], retval
, 1);
23672 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23673 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23674 emit_insn (gen_rtx_SET (boolval
, x
));
23677 /* Expand an atomic exchange operation. */
23680 rs6000_expand_atomic_exchange (rtx operands
[])
23682 rtx retval
, mem
, val
, cond
;
23684 enum memmodel model
;
23685 rtx label
, x
, mask
, shift
;
23687 retval
= operands
[0];
23690 model
= memmodel_base (INTVAL (operands
[3]));
23691 mode
= GET_MODE (mem
);
23693 mask
= shift
= NULL_RTX
;
23694 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23696 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23698 /* Shift and mask VAL into position with the word. */
23699 val
= convert_modes (SImode
, mode
, val
, 1);
23700 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23701 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23703 /* Prepare to adjust the return value. */
23704 retval
= gen_reg_rtx (SImode
);
23708 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23710 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23711 emit_label (XEXP (label
, 0));
23713 emit_load_locked (mode
, retval
, mem
);
23717 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23719 cond
= gen_reg_rtx (CCmode
);
23720 emit_store_conditional (mode
, cond
, mem
, x
);
23722 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23723 emit_unlikely_jump (x
, label
);
23725 rs6000_post_atomic_barrier (model
);
23728 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23731 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23732 to perform. MEM is the memory on which to operate. VAL is the second
23733 operand of the binary operator. BEFORE and AFTER are optional locations to
23734 return the value of MEM either before of after the operation. MODEL_RTX
23735 is a CONST_INT containing the memory model to use. */
23738 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23739 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23741 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23742 machine_mode mode
= GET_MODE (mem
);
23743 machine_mode store_mode
= mode
;
23744 rtx label
, x
, cond
, mask
, shift
;
23745 rtx before
= orig_before
, after
= orig_after
;
23747 mask
= shift
= NULL_RTX
;
23748 /* On power8, we want to use SImode for the operation. On previous systems,
23749 use the operation in a subword and shift/mask to get the proper byte or
23751 if (mode
== QImode
|| mode
== HImode
)
23753 if (TARGET_SYNC_HI_QI
)
23755 val
= convert_modes (SImode
, mode
, val
, 1);
23757 /* Prepare to adjust the return value. */
23758 before
= gen_reg_rtx (SImode
);
23760 after
= gen_reg_rtx (SImode
);
23765 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23767 /* Shift and mask VAL into position with the word. */
23768 val
= convert_modes (SImode
, mode
, val
, 1);
23769 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23770 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23776 /* We've already zero-extended VAL. That is sufficient to
23777 make certain that it does not affect other bits. */
23782 /* If we make certain that all of the other bits in VAL are
23783 set, that will be sufficient to not affect other bits. */
23784 x
= gen_rtx_NOT (SImode
, mask
);
23785 x
= gen_rtx_IOR (SImode
, x
, val
);
23786 emit_insn (gen_rtx_SET (val
, x
));
23793 /* These will all affect bits outside the field and need
23794 adjustment via MASK within the loop. */
23798 gcc_unreachable ();
23801 /* Prepare to adjust the return value. */
23802 before
= gen_reg_rtx (SImode
);
23804 after
= gen_reg_rtx (SImode
);
23805 store_mode
= mode
= SImode
;
23809 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23811 label
= gen_label_rtx ();
23812 emit_label (label
);
23813 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23815 if (before
== NULL_RTX
)
23816 before
= gen_reg_rtx (mode
);
23818 emit_load_locked (mode
, before
, mem
);
23822 x
= expand_simple_binop (mode
, AND
, before
, val
,
23823 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23824 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23828 after
= expand_simple_binop (mode
, code
, before
, val
,
23829 after
, 1, OPTAB_LIB_WIDEN
);
23835 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23836 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23837 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23839 else if (store_mode
!= mode
)
23840 x
= convert_modes (store_mode
, mode
, x
, 1);
23842 cond
= gen_reg_rtx (CCmode
);
23843 emit_store_conditional (store_mode
, cond
, mem
, x
);
23845 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23846 emit_unlikely_jump (x
, label
);
23848 rs6000_post_atomic_barrier (model
);
23852 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23853 then do the calcuations in a SImode register. */
23855 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23857 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23859 else if (store_mode
!= mode
)
23861 /* QImode/HImode on machines with lbarx/lharx where we do the native
23862 operation and then do the calcuations in a SImode register. */
23864 convert_move (orig_before
, before
, 1);
23866 convert_move (orig_after
, after
, 1);
23868 else if (orig_after
&& after
!= orig_after
)
23869 emit_move_insn (orig_after
, after
);
23872 /* Emit instructions to move SRC to DST. Called by splitters for
23873 multi-register moves. It will emit at most one instruction for
23874 each register that is accessed; that is, it won't emit li/lis pairs
23875 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23879 rs6000_split_multireg_move (rtx dst
, rtx src
)
23881 /* The register number of the first register being moved. */
23883 /* The mode that is to be moved. */
23885 /* The mode that the move is being done in, and its size. */
23886 machine_mode reg_mode
;
23888 /* The number of registers that will be moved. */
23891 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23892 mode
= GET_MODE (dst
);
23893 nregs
= hard_regno_nregs (reg
, mode
);
23894 if (FP_REGNO_P (reg
))
23895 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23896 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
23897 else if (ALTIVEC_REGNO_P (reg
))
23898 reg_mode
= V16QImode
;
23900 reg_mode
= word_mode
;
23901 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23903 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23905 /* TDmode residing in FP registers is special, since the ISA requires that
23906 the lower-numbered word of a register pair is always the most significant
23907 word, even in little-endian mode. This does not match the usual subreg
23908 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23909 the appropriate constituent registers "by hand" in little-endian mode.
23911 Note we do not need to check for destructive overlap here since TDmode
23912 can only reside in even/odd register pairs. */
23913 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23918 for (i
= 0; i
< nregs
; i
++)
23920 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23921 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23923 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23924 i
* reg_mode_size
);
23926 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23927 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23929 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23930 i
* reg_mode_size
);
23932 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23938 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23940 /* Move register range backwards, if we might have destructive
23943 for (i
= nregs
- 1; i
>= 0; i
--)
23944 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23945 i
* reg_mode_size
),
23946 simplify_gen_subreg (reg_mode
, src
, mode
,
23947 i
* reg_mode_size
)));
23953 bool used_update
= false;
23954 rtx restore_basereg
= NULL_RTX
;
23956 if (MEM_P (src
) && INT_REGNO_P (reg
))
23960 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
23961 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
23964 breg
= XEXP (XEXP (src
, 0), 0);
23965 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
23966 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
23967 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
23968 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23969 src
= replace_equiv_address (src
, breg
);
23971 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
23973 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
23975 rtx basereg
= XEXP (XEXP (src
, 0), 0);
23978 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
23979 emit_insn (gen_rtx_SET (ndst
,
23980 gen_rtx_MEM (reg_mode
,
23982 used_update
= true;
23985 emit_insn (gen_rtx_SET (basereg
,
23986 XEXP (XEXP (src
, 0), 1)));
23987 src
= replace_equiv_address (src
, basereg
);
23991 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
23992 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
23993 src
= replace_equiv_address (src
, basereg
);
23997 breg
= XEXP (src
, 0);
23998 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
23999 breg
= XEXP (breg
, 0);
24001 /* If the base register we are using to address memory is
24002 also a destination reg, then change that register last. */
24004 && REGNO (breg
) >= REGNO (dst
)
24005 && REGNO (breg
) < REGNO (dst
) + nregs
)
24006 j
= REGNO (breg
) - REGNO (dst
);
24008 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
24012 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
24013 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
24016 breg
= XEXP (XEXP (dst
, 0), 0);
24017 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
24018 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
24019 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
24021 /* We have to update the breg before doing the store.
24022 Use store with update, if available. */
24026 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
24027 emit_insn (TARGET_32BIT
24028 ? (TARGET_POWERPC64
24029 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
24030 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
24031 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
24032 used_update
= true;
24035 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
24036 dst
= replace_equiv_address (dst
, breg
);
24038 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
24039 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
24041 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
24043 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
24046 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
24047 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
24050 used_update
= true;
24053 emit_insn (gen_rtx_SET (basereg
,
24054 XEXP (XEXP (dst
, 0), 1)));
24055 dst
= replace_equiv_address (dst
, basereg
);
24059 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
24060 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
24061 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
24063 && REG_P (offsetreg
)
24064 && REGNO (basereg
) != REGNO (offsetreg
));
24065 if (REGNO (basereg
) == 0)
24067 rtx tmp
= offsetreg
;
24068 offsetreg
= basereg
;
24071 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
24072 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
24073 dst
= replace_equiv_address (dst
, basereg
);
24076 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
24077 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
24080 for (i
= 0; i
< nregs
; i
++)
24082 /* Calculate index to next subword. */
24087 /* If compiler already emitted move of first word by
24088 store with update, no need to do anything. */
24089 if (j
== 0 && used_update
)
24092 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
24093 j
* reg_mode_size
),
24094 simplify_gen_subreg (reg_mode
, src
, mode
,
24095 j
* reg_mode_size
)));
24097 if (restore_basereg
!= NULL_RTX
)
24098 emit_insn (restore_basereg
);
24103 /* This page contains routines that are used to determine what the
24104 function prologue and epilogue code will do and write them out. */
24106 /* Determine whether the REG is really used. */
24109 save_reg_p (int reg
)
24111 /* We need to mark the PIC offset register live for the same conditions
24112 as it is set up, or otherwise it won't be saved before we clobber it. */
24114 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
24116 /* When calling eh_return, we must return true for all the cases
24117 where conditional_register_usage marks the PIC offset reg
24119 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
24120 && (crtl
->calls_eh_return
24121 || df_regs_ever_live_p (reg
)
24122 || !constant_pool_empty_p ()))
24125 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
24130 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
24133 /* Return the first fixed-point register that is required to be
24134 saved. 32 if none. */
24137 first_reg_to_save (void)
24141 /* Find lowest numbered live register. */
24142 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
24143 if (save_reg_p (first_reg
))
24148 && crtl
->uses_pic_offset_table
24149 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24150 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
24156 /* Similar, for FP regs. */
24159 first_fp_reg_to_save (void)
24163 /* Find lowest numbered live register. */
24164 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
24165 if (save_reg_p (first_reg
))
24171 /* Similar, for AltiVec regs. */
24174 first_altivec_reg_to_save (void)
24178 /* Stack frame remains as is unless we are in AltiVec ABI. */
24179 if (! TARGET_ALTIVEC_ABI
)
24180 return LAST_ALTIVEC_REGNO
+ 1;
24182 /* On Darwin, the unwind routines are compiled without
24183 TARGET_ALTIVEC, and use save_world to save/restore the
24184 altivec registers when necessary. */
24185 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24186 && ! TARGET_ALTIVEC
)
24187 return FIRST_ALTIVEC_REGNO
+ 20;
24189 /* Find lowest numbered live register. */
24190 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24191 if (save_reg_p (i
))
24197 /* Return a 32-bit mask of the AltiVec registers we need to set in
24198 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24199 the 32-bit word is 0. */
24201 static unsigned int
24202 compute_vrsave_mask (void)
24204 unsigned int i
, mask
= 0;
24206 /* On Darwin, the unwind routines are compiled without
24207 TARGET_ALTIVEC, and use save_world to save/restore the
24208 call-saved altivec registers when necessary. */
24209 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24210 && ! TARGET_ALTIVEC
)
24213 /* First, find out if we use _any_ altivec registers. */
24214 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24215 if (df_regs_ever_live_p (i
))
24216 mask
|= ALTIVEC_REG_BIT (i
);
24221 /* Next, remove the argument registers from the set. These must
24222 be in the VRSAVE mask set by the caller, so we don't need to add
24223 them in again. More importantly, the mask we compute here is
24224 used to generate CLOBBERs in the set_vrsave insn, and we do not
24225 wish the argument registers to die. */
24226 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
24227 mask
&= ~ALTIVEC_REG_BIT (i
);
24229 /* Similarly, remove the return value from the set. */
24232 diddle_return_value (is_altivec_return_reg
, &yes
);
24234 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
24240 /* For a very restricted set of circumstances, we can cut down the
24241 size of prologues/epilogues by calling our own save/restore-the-world
24245 compute_save_world_info (rs6000_stack_t
*info
)
24247 info
->world_save_p
= 1;
24249 = (WORLD_SAVE_P (info
)
24250 && DEFAULT_ABI
== ABI_DARWIN
24251 && !cfun
->has_nonlocal_label
24252 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
24253 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
24254 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
24255 && info
->cr_save_p
);
24257 /* This will not work in conjunction with sibcalls. Make sure there
24258 are none. (This check is expensive, but seldom executed.) */
24259 if (WORLD_SAVE_P (info
))
24262 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
24263 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
24265 info
->world_save_p
= 0;
24270 if (WORLD_SAVE_P (info
))
24272 /* Even if we're not touching VRsave, make sure there's room on the
24273 stack for it, if it looks like we're calling SAVE_WORLD, which
24274 will attempt to save it. */
24275 info
->vrsave_size
= 4;
24277 /* If we are going to save the world, we need to save the link register too. */
24278 info
->lr_save_p
= 1;
24280 /* "Save" the VRsave register too if we're saving the world. */
24281 if (info
->vrsave_mask
== 0)
24282 info
->vrsave_mask
= compute_vrsave_mask ();
24284 /* Because the Darwin register save/restore routines only handle
24285 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24287 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
24288 && (info
->first_altivec_reg_save
24289 >= FIRST_SAVED_ALTIVEC_REGNO
));
24297 is_altivec_return_reg (rtx reg
, void *xyes
)
24299 bool *yes
= (bool *) xyes
;
24300 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
24305 /* Return whether REG is a global user reg or has been specifed by
24306 -ffixed-REG. We should not restore these, and so cannot use
24307 lmw or out-of-line restore functions if there are any. We also
24308 can't save them (well, emit frame notes for them), because frame
24309 unwinding during exception handling will restore saved registers. */
24312 fixed_reg_p (int reg
)
24314 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24315 backend sets it, overriding anything the user might have given. */
24316 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
24317 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24318 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
24319 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
24322 return fixed_regs
[reg
];
24325 /* Determine the strategy for savings/restoring registers. */
24328 SAVE_MULTIPLE
= 0x1,
24329 SAVE_INLINE_GPRS
= 0x2,
24330 SAVE_INLINE_FPRS
= 0x4,
24331 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
24332 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
24333 SAVE_INLINE_VRS
= 0x20,
24334 REST_MULTIPLE
= 0x100,
24335 REST_INLINE_GPRS
= 0x200,
24336 REST_INLINE_FPRS
= 0x400,
24337 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
24338 REST_INLINE_VRS
= 0x1000
24342 rs6000_savres_strategy (rs6000_stack_t
*info
,
24343 bool using_static_chain_p
)
24347 /* Select between in-line and out-of-line save and restore of regs.
24348 First, all the obvious cases where we don't use out-of-line. */
24349 if (crtl
->calls_eh_return
24350 || cfun
->machine
->ra_need_lr
)
24351 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
24352 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
24353 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
24355 if (info
->first_gp_reg_save
== 32)
24356 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24358 if (info
->first_fp_reg_save
== 64
24359 /* The out-of-line FP routines use double-precision stores;
24360 we can't use those routines if we don't have such stores. */
24361 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
))
24362 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24364 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
24365 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24367 /* Define cutoff for using out-of-line functions to save registers. */
24368 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
24370 if (!optimize_size
)
24372 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24373 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24374 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24378 /* Prefer out-of-line restore if it will exit. */
24379 if (info
->first_fp_reg_save
> 61)
24380 strategy
|= SAVE_INLINE_FPRS
;
24381 if (info
->first_gp_reg_save
> 29)
24383 if (info
->first_fp_reg_save
== 64)
24384 strategy
|= SAVE_INLINE_GPRS
;
24386 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24388 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
24389 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24392 else if (DEFAULT_ABI
== ABI_DARWIN
)
24394 if (info
->first_fp_reg_save
> 60)
24395 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24396 if (info
->first_gp_reg_save
> 29)
24397 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24398 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24402 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24403 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
24404 || info
->first_fp_reg_save
> 61)
24405 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24406 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24407 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24410 /* Don't bother to try to save things out-of-line if r11 is occupied
24411 by the static chain. It would require too much fiddling and the
24412 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24413 pointer on Darwin, and AIX uses r1 or r12. */
24414 if (using_static_chain_p
24415 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
24416 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
24418 | SAVE_INLINE_VRS
);
24420 /* Don't ever restore fixed regs. That means we can't use the
24421 out-of-line register restore functions if a fixed reg is in the
24422 range of regs restored. */
24423 if (!(strategy
& REST_INLINE_FPRS
))
24424 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24427 strategy
|= REST_INLINE_FPRS
;
24431 /* We can only use the out-of-line routines to restore fprs if we've
24432 saved all the registers from first_fp_reg_save in the prologue.
24433 Otherwise, we risk loading garbage. Of course, if we have saved
24434 out-of-line then we know we haven't skipped any fprs. */
24435 if ((strategy
& SAVE_INLINE_FPRS
)
24436 && !(strategy
& REST_INLINE_FPRS
))
24437 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24438 if (!save_reg_p (i
))
24440 strategy
|= REST_INLINE_FPRS
;
24444 /* Similarly, for altivec regs. */
24445 if (!(strategy
& REST_INLINE_VRS
))
24446 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24449 strategy
|= REST_INLINE_VRS
;
24453 if ((strategy
& SAVE_INLINE_VRS
)
24454 && !(strategy
& REST_INLINE_VRS
))
24455 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24456 if (!save_reg_p (i
))
24458 strategy
|= REST_INLINE_VRS
;
24462 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24463 saved is an out-of-line save or restore. Set up the value for
24464 the next test (excluding out-of-line gprs). */
24465 bool lr_save_p
= (info
->lr_save_p
24466 || !(strategy
& SAVE_INLINE_FPRS
)
24467 || !(strategy
& SAVE_INLINE_VRS
)
24468 || !(strategy
& REST_INLINE_FPRS
)
24469 || !(strategy
& REST_INLINE_VRS
));
24471 if (TARGET_MULTIPLE
24472 && !TARGET_POWERPC64
24473 && info
->first_gp_reg_save
< 31
24474 && !(flag_shrink_wrap
24475 && flag_shrink_wrap_separate
24476 && optimize_function_for_speed_p (cfun
)))
24479 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24480 if (save_reg_p (i
))
24484 /* Don't use store multiple if only one reg needs to be
24485 saved. This can occur for example when the ABI_V4 pic reg
24486 (r30) needs to be saved to make calls, but r31 is not
24488 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24491 /* Prefer store multiple for saves over out-of-line
24492 routines, since the store-multiple instruction will
24493 always be smaller. */
24494 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
24496 /* The situation is more complicated with load multiple.
24497 We'd prefer to use the out-of-line routines for restores,
24498 since the "exit" out-of-line routines can handle the
24499 restore of LR and the frame teardown. However if doesn't
24500 make sense to use the out-of-line routine if that is the
24501 only reason we'd need to save LR, and we can't use the
24502 "exit" out-of-line gpr restore if we have saved some
24503 fprs; In those cases it is advantageous to use load
24504 multiple when available. */
24505 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
24506 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
24510 /* Using the "exit" out-of-line routine does not improve code size
24511 if using it would require lr to be saved and if only saving one
24513 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
24514 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24516 /* Don't ever restore fixed regs. */
24517 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24518 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24519 if (fixed_reg_p (i
))
24521 strategy
|= REST_INLINE_GPRS
;
24522 strategy
&= ~REST_MULTIPLE
;
24526 /* We can only use load multiple or the out-of-line routines to
24527 restore gprs if we've saved all the registers from
24528 first_gp_reg_save. Otherwise, we risk loading garbage.
24529 Of course, if we have saved out-of-line or used stmw then we know
24530 we haven't skipped any gprs. */
24531 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
24532 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24533 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24534 if (!save_reg_p (i
))
24536 strategy
|= REST_INLINE_GPRS
;
24537 strategy
&= ~REST_MULTIPLE
;
24541 if (TARGET_ELF
&& TARGET_64BIT
)
24543 if (!(strategy
& SAVE_INLINE_FPRS
))
24544 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24545 else if (!(strategy
& SAVE_INLINE_GPRS
)
24546 && info
->first_fp_reg_save
== 64)
24547 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
24549 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
24550 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
24552 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
24553 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24558 /* Calculate the stack information for the current function. This is
24559 complicated by having two separate calling sequences, the AIX calling
24560 sequence and the V.4 calling sequence.
24562 AIX (and Darwin/Mac OS X) stack frames look like:
24564 SP----> +---------------------------------------+
24565 | back chain to caller | 0 0
24566 +---------------------------------------+
24567 | saved CR | 4 8 (8-11)
24568 +---------------------------------------+
24570 +---------------------------------------+
24571 | reserved for compilers | 12 24
24572 +---------------------------------------+
24573 | reserved for binders | 16 32
24574 +---------------------------------------+
24575 | saved TOC pointer | 20 40
24576 +---------------------------------------+
24577 | Parameter save area (+padding*) (P) | 24 48
24578 +---------------------------------------+
24579 | Alloca space (A) | 24+P etc.
24580 +---------------------------------------+
24581 | Local variable space (L) | 24+P+A
24582 +---------------------------------------+
24583 | Float/int conversion temporary (X) | 24+P+A+L
24584 +---------------------------------------+
24585 | Save area for AltiVec registers (W) | 24+P+A+L+X
24586 +---------------------------------------+
24587 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24588 +---------------------------------------+
24589 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24590 +---------------------------------------+
24591 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24592 +---------------------------------------+
24593 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24594 +---------------------------------------+
24595 old SP->| back chain to caller's caller |
24596 +---------------------------------------+
24598 * If the alloca area is present, the parameter save area is
24599 padded so that the former starts 16-byte aligned.
24601 The required alignment for AIX configurations is two words (i.e., 8
24604 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24606 SP----> +---------------------------------------+
24607 | Back chain to caller | 0
24608 +---------------------------------------+
24609 | Save area for CR | 8
24610 +---------------------------------------+
24612 +---------------------------------------+
24613 | Saved TOC pointer | 24
24614 +---------------------------------------+
24615 | Parameter save area (+padding*) (P) | 32
24616 +---------------------------------------+
24617 | Alloca space (A) | 32+P
24618 +---------------------------------------+
24619 | Local variable space (L) | 32+P+A
24620 +---------------------------------------+
24621 | Save area for AltiVec registers (W) | 32+P+A+L
24622 +---------------------------------------+
24623 | AltiVec alignment padding (Y) | 32+P+A+L+W
24624 +---------------------------------------+
24625 | Save area for GP registers (G) | 32+P+A+L+W+Y
24626 +---------------------------------------+
24627 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24628 +---------------------------------------+
24629 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24630 +---------------------------------------+
24632 * If the alloca area is present, the parameter save area is
24633 padded so that the former starts 16-byte aligned.
24635 V.4 stack frames look like:
24637 SP----> +---------------------------------------+
24638 | back chain to caller | 0
24639 +---------------------------------------+
24640 | caller's saved LR | 4
24641 +---------------------------------------+
24642 | Parameter save area (+padding*) (P) | 8
24643 +---------------------------------------+
24644 | Alloca space (A) | 8+P
24645 +---------------------------------------+
24646 | Varargs save area (V) | 8+P+A
24647 +---------------------------------------+
24648 | Local variable space (L) | 8+P+A+V
24649 +---------------------------------------+
24650 | Float/int conversion temporary (X) | 8+P+A+V+L
24651 +---------------------------------------+
24652 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24653 +---------------------------------------+
24654 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24655 +---------------------------------------+
24656 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24657 +---------------------------------------+
24658 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24659 +---------------------------------------+
24660 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24661 +---------------------------------------+
24662 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24663 +---------------------------------------+
24664 old SP->| back chain to caller's caller |
24665 +---------------------------------------+
24667 * If the alloca area is present and the required alignment is
24668 16 bytes, the parameter save area is padded so that the
24669 alloca area starts 16-byte aligned.
24671 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24672 given. (But note below and in sysv4.h that we require only 8 and
24673 may round up the size of our stack frame anyways. The historical
24674 reason is early versions of powerpc-linux which didn't properly
24675 align the stack at program startup. A happy side-effect is that
24676 -mno-eabi libraries can be used with -meabi programs.)
24678 The EABI configuration defaults to the V.4 layout. However,
24679 the stack alignment requirements may differ. If -mno-eabi is not
24680 given, the required stack alignment is 8 bytes; if -mno-eabi is
24681 given, the required alignment is 16 bytes. (But see V.4 comment
24684 #ifndef ABI_STACK_BOUNDARY
24685 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24688 static rs6000_stack_t
*
24689 rs6000_stack_info (void)
24691 /* We should never be called for thunks, we are not set up for that. */
24692 gcc_assert (!cfun
->is_thunk
);
24694 rs6000_stack_t
*info
= &stack_info
;
24695 int reg_size
= TARGET_32BIT
? 4 : 8;
24700 HOST_WIDE_INT non_fixed_size
;
24701 bool using_static_chain_p
;
24703 if (reload_completed
&& info
->reload_completed
)
24706 memset (info
, 0, sizeof (*info
));
24707 info
->reload_completed
= reload_completed
;
24709 /* Select which calling sequence. */
24710 info
->abi
= DEFAULT_ABI
;
24712 /* Calculate which registers need to be saved & save area size. */
24713 info
->first_gp_reg_save
= first_reg_to_save ();
24714 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24715 even if it currently looks like we won't. Reload may need it to
24716 get at a constant; if so, it will have already created a constant
24717 pool entry for it. */
24718 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24719 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24720 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24721 && crtl
->uses_const_pool
24722 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24723 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24725 first_gp
= info
->first_gp_reg_save
;
24727 info
->gp_size
= reg_size
* (32 - first_gp
);
24729 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24730 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24732 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24733 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24734 - info
->first_altivec_reg_save
);
24736 /* Does this function call anything? */
24737 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24739 /* Determine if we need to save the condition code registers. */
24740 if (save_reg_p (CR2_REGNO
)
24741 || save_reg_p (CR3_REGNO
)
24742 || save_reg_p (CR4_REGNO
))
24744 info
->cr_save_p
= 1;
24745 if (DEFAULT_ABI
== ABI_V4
)
24746 info
->cr_size
= reg_size
;
24749 /* If the current function calls __builtin_eh_return, then we need
24750 to allocate stack space for registers that will hold data for
24751 the exception handler. */
24752 if (crtl
->calls_eh_return
)
24755 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24758 ehrd_size
= i
* UNITS_PER_WORD
;
24763 /* In the ELFv2 ABI, we also need to allocate space for separate
24764 CR field save areas if the function calls __builtin_eh_return. */
24765 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24767 /* This hard-codes that we have three call-saved CR fields. */
24768 ehcr_size
= 3 * reg_size
;
24769 /* We do *not* use the regular CR save mechanism. */
24770 info
->cr_save_p
= 0;
24775 /* Determine various sizes. */
24776 info
->reg_size
= reg_size
;
24777 info
->fixed_size
= RS6000_SAVE_AREA
;
24778 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24779 if (cfun
->calls_alloca
)
24781 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24782 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24784 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24785 TARGET_ALTIVEC
? 16 : 8);
24786 if (FRAME_GROWS_DOWNWARD
)
24788 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24789 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24790 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24792 if (TARGET_ALTIVEC_ABI
)
24793 info
->vrsave_mask
= compute_vrsave_mask ();
24795 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24796 info
->vrsave_size
= 4;
24798 compute_save_world_info (info
);
24800 /* Calculate the offsets. */
24801 switch (DEFAULT_ABI
)
24805 gcc_unreachable ();
24810 info
->fp_save_offset
= -info
->fp_size
;
24811 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24813 if (TARGET_ALTIVEC_ABI
)
24815 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24817 /* Align stack so vector save area is on a quadword boundary.
24818 The padding goes above the vectors. */
24819 if (info
->altivec_size
!= 0)
24820 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24822 info
->altivec_save_offset
= info
->vrsave_save_offset
24823 - info
->altivec_padding_size
24824 - info
->altivec_size
;
24825 gcc_assert (info
->altivec_size
== 0
24826 || info
->altivec_save_offset
% 16 == 0);
24828 /* Adjust for AltiVec case. */
24829 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24832 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24834 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24835 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24836 info
->lr_save_offset
= 2*reg_size
;
24840 info
->fp_save_offset
= -info
->fp_size
;
24841 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24842 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24844 if (TARGET_ALTIVEC_ABI
)
24846 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24848 /* Align stack so vector save area is on a quadword boundary. */
24849 if (info
->altivec_size
!= 0)
24850 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24852 info
->altivec_save_offset
= info
->vrsave_save_offset
24853 - info
->altivec_padding_size
24854 - info
->altivec_size
;
24856 /* Adjust for AltiVec case. */
24857 info
->ehrd_offset
= info
->altivec_save_offset
;
24860 info
->ehrd_offset
= info
->cr_save_offset
;
24862 info
->ehrd_offset
-= ehrd_size
;
24863 info
->lr_save_offset
= reg_size
;
24866 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24867 info
->save_size
= RS6000_ALIGN (info
->fp_size
24869 + info
->altivec_size
24870 + info
->altivec_padding_size
24874 + info
->vrsave_size
,
24877 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24879 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24880 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24882 /* Determine if we need to save the link register. */
24884 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24886 && !TARGET_PROFILE_KERNEL
)
24887 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24888 #ifdef TARGET_RELOCATABLE
24889 || (DEFAULT_ABI
== ABI_V4
24890 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24891 && !constant_pool_empty_p ())
24893 || rs6000_ra_ever_killed ())
24894 info
->lr_save_p
= 1;
24896 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24897 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24898 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24899 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24901 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24902 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24903 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24904 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24905 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24906 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24907 info
->lr_save_p
= 1;
24909 if (info
->lr_save_p
)
24910 df_set_regs_ever_live (LR_REGNO
, true);
24912 /* Determine if we need to allocate any stack frame:
24914 For AIX we need to push the stack if a frame pointer is needed
24915 (because the stack might be dynamically adjusted), if we are
24916 debugging, if we make calls, or if the sum of fp_save, gp_save,
24917 and local variables are more than the space needed to save all
24918 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24919 + 18*8 = 288 (GPR13 reserved).
24921 For V.4 we don't have the stack cushion that AIX uses, but assume
24922 that the debugger can handle stackless frames. */
24927 else if (DEFAULT_ABI
== ABI_V4
)
24928 info
->push_p
= non_fixed_size
!= 0;
24930 else if (frame_pointer_needed
)
24933 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24937 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24943 debug_stack_info (rs6000_stack_t
*info
)
24945 const char *abi_string
;
24948 info
= rs6000_stack_info ();
24950 fprintf (stderr
, "\nStack information for function %s:\n",
24951 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24952 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
24957 default: abi_string
= "Unknown"; break;
24958 case ABI_NONE
: abi_string
= "NONE"; break;
24959 case ABI_AIX
: abi_string
= "AIX"; break;
24960 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
24961 case ABI_DARWIN
: abi_string
= "Darwin"; break;
24962 case ABI_V4
: abi_string
= "V.4"; break;
24965 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
24967 if (TARGET_ALTIVEC_ABI
)
24968 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
24970 if (info
->first_gp_reg_save
!= 32)
24971 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
24973 if (info
->first_fp_reg_save
!= 64)
24974 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
24976 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
24977 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
24978 info
->first_altivec_reg_save
);
24980 if (info
->lr_save_p
)
24981 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
24983 if (info
->cr_save_p
)
24984 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
24986 if (info
->vrsave_mask
)
24987 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
24990 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
24993 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
24996 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
24999 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
25001 if (info
->altivec_size
)
25002 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
25003 info
->altivec_save_offset
);
25005 if (info
->vrsave_size
)
25006 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
25007 info
->vrsave_save_offset
);
25009 if (info
->lr_save_p
)
25010 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
25012 if (info
->cr_save_p
)
25013 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
25015 if (info
->varargs_save_offset
)
25016 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
25018 if (info
->total_size
)
25019 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
25022 if (info
->vars_size
)
25023 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
25026 if (info
->parm_size
)
25027 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
25029 if (info
->fixed_size
)
25030 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
25033 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
25036 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
25038 if (info
->altivec_size
)
25039 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
25041 if (info
->vrsave_size
)
25042 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
25044 if (info
->altivec_padding_size
)
25045 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
25046 info
->altivec_padding_size
);
25049 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
25051 if (info
->save_size
)
25052 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
25054 if (info
->reg_size
!= 4)
25055 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
25057 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
25059 fprintf (stderr
, "\n");
25063 rs6000_return_addr (int count
, rtx frame
)
25065 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25066 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25068 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
25070 cfun
->machine
->ra_needs_full_frame
= 1;
25073 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25074 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25075 frame
= stack_pointer_rtx
;
25076 rtx prev_frame_addr
= memory_address (Pmode
, frame
);
25077 rtx prev_frame
= copy_to_reg (gen_rtx_MEM (Pmode
, prev_frame_addr
));
25078 rtx lr_save_off
= plus_constant (Pmode
,
25079 prev_frame
, RETURN_ADDRESS_OFFSET
);
25080 rtx lr_save_addr
= memory_address (Pmode
, lr_save_off
);
25081 return gen_rtx_MEM (Pmode
, lr_save_addr
);
25084 cfun
->machine
->ra_need_lr
= 1;
25085 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
25088 /* Say whether a function is a candidate for sibcall handling or not. */
25091 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
25096 fntype
= TREE_TYPE (decl
);
25098 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
25100 /* We can't do it if the called function has more vector parameters
25101 than the current function; there's nowhere to put the VRsave code. */
25102 if (TARGET_ALTIVEC_ABI
25103 && TARGET_ALTIVEC_VRSAVE
25104 && !(decl
&& decl
== current_function_decl
))
25106 function_args_iterator args_iter
;
25110 /* Functions with vector parameters are required to have a
25111 prototype, so the argument type info must be available
25113 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
25114 if (TREE_CODE (type
) == VECTOR_TYPE
25115 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25118 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
25119 if (TREE_CODE (type
) == VECTOR_TYPE
25120 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25127 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25128 functions, because the callee may have a different TOC pointer to
25129 the caller and there's no way to ensure we restore the TOC when
25130 we return. With the secure-plt SYSV ABI we can't make non-local
25131 calls when -fpic/PIC because the plt call stubs use r30. */
25132 if (DEFAULT_ABI
== ABI_DARWIN
25133 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25135 && !DECL_EXTERNAL (decl
)
25136 && !DECL_WEAK (decl
)
25137 && (*targetm
.binds_local_p
) (decl
))
25138 || (DEFAULT_ABI
== ABI_V4
25139 && (!TARGET_SECURE_PLT
25142 && (*targetm
.binds_local_p
) (decl
)))))
25144 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
25146 if (!lookup_attribute ("longcall", attr_list
)
25147 || lookup_attribute ("shortcall", attr_list
))
25155 rs6000_ra_ever_killed (void)
25161 if (cfun
->is_thunk
)
25164 if (cfun
->machine
->lr_save_state
)
25165 return cfun
->machine
->lr_save_state
- 1;
25167 /* regs_ever_live has LR marked as used if any sibcalls are present,
25168 but this should not force saving and restoring in the
25169 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25170 clobbers LR, so that is inappropriate. */
25172 /* Also, the prologue can generate a store into LR that
25173 doesn't really count, like this:
25176 bcl to set PIC register
25180 When we're called from the epilogue, we need to avoid counting
25181 this as a store. */
25183 push_topmost_sequence ();
25184 top
= get_insns ();
25185 pop_topmost_sequence ();
25186 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
25188 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
25194 if (!SIBLING_CALL_P (insn
))
25197 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
25199 else if (set_of (reg
, insn
) != NULL_RTX
25200 && !prologue_epilogue_contains (insn
))
25207 /* Emit instructions needed to load the TOC register.
25208 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25209 a constant pool; or for SVR4 -fpic. */
25212 rs6000_emit_load_toc_table (int fromprolog
)
25215 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
25217 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
25220 rtx lab
, tmp1
, tmp2
, got
;
25222 lab
= gen_label_rtx ();
25223 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
25224 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25227 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25231 got
= rs6000_got_sym ();
25232 tmp1
= tmp2
= dest
;
25235 tmp1
= gen_reg_rtx (Pmode
);
25236 tmp2
= gen_reg_rtx (Pmode
);
25238 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
25239 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
25240 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
25241 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
25243 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
25245 emit_insn (gen_load_toc_v4_pic_si ());
25246 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25248 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
25251 rtx temp0
= (fromprolog
25252 ? gen_rtx_REG (Pmode
, 0)
25253 : gen_reg_rtx (Pmode
));
25259 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25260 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25262 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
25263 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25265 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
25266 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25267 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
25273 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25275 lab
= gen_label_rtx ();
25276 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
25277 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25278 if (TARGET_LINK_STACK
)
25279 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
25280 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
25282 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
25284 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
25286 /* This is for AIX code running in non-PIC ELF32. */
25287 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25290 emit_insn (gen_elf_high (dest
, realsym
));
25291 emit_insn (gen_elf_low (dest
, dest
, realsym
));
25295 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
25298 emit_insn (gen_load_toc_aix_si (dest
));
25300 emit_insn (gen_load_toc_aix_di (dest
));
25304 /* Emit instructions to restore the link register after determining where
25305 its value has been stored. */
25308 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
25310 rs6000_stack_t
*info
= rs6000_stack_info ();
25313 operands
[0] = source
;
25314 operands
[1] = scratch
;
25316 if (info
->lr_save_p
)
25318 rtx frame_rtx
= stack_pointer_rtx
;
25319 HOST_WIDE_INT sp_offset
= 0;
25322 if (frame_pointer_needed
25323 || cfun
->calls_alloca
25324 || info
->total_size
> 32767)
25326 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
25327 emit_move_insn (operands
[1], tmp
);
25328 frame_rtx
= operands
[1];
25330 else if (info
->push_p
)
25331 sp_offset
= info
->total_size
;
25333 tmp
= plus_constant (Pmode
, frame_rtx
,
25334 info
->lr_save_offset
+ sp_offset
);
25335 tmp
= gen_frame_mem (Pmode
, tmp
);
25336 emit_move_insn (tmp
, operands
[0]);
25339 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
25341 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25342 state of lr_save_p so any change from here on would be a bug. In
25343 particular, stop rs6000_ra_ever_killed from considering the SET
25344 of lr we may have added just above. */
25345 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
25348 static GTY(()) alias_set_type set
= -1;
25351 get_TOC_alias_set (void)
25354 set
= new_alias_set ();
25358 /* This returns nonzero if the current function uses the TOC. This is
25359 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25360 is generated by the ABI_V4 load_toc_* patterns.
25361 Return 2 instead of 1 if the load_toc_* pattern is in the function
25362 partition that doesn't start the function. */
25370 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
25374 rtx pat
= PATTERN (insn
);
25377 if (GET_CODE (pat
) == PARALLEL
)
25378 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
25380 rtx sub
= XVECEXP (pat
, 0, i
);
25381 if (GET_CODE (sub
) == USE
)
25383 sub
= XEXP (sub
, 0);
25384 if (GET_CODE (sub
) == UNSPEC
25385 && XINT (sub
, 1) == UNSPEC_TOC
)
25390 else if (crtl
->has_bb_partition
25392 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
25400 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
25402 rtx tocrel
, tocreg
, hi
;
25404 if (TARGET_DEBUG_ADDR
)
25406 if (GET_CODE (symbol
) == SYMBOL_REF
)
25407 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25411 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
25412 GET_RTX_NAME (GET_CODE (symbol
)));
25413 debug_rtx (symbol
);
25417 if (!can_create_pseudo_p ())
25418 df_set_regs_ever_live (TOC_REGISTER
, true);
25420 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
25421 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
25422 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
25425 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
25426 if (largetoc_reg
!= NULL
)
25428 emit_move_insn (largetoc_reg
, hi
);
25431 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
25434 /* Issue assembly directives that create a reference to the given DWARF
25435 FRAME_TABLE_LABEL from the current function section. */
25437 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
25439 fprintf (asm_out_file
, "\t.ref %s\n",
25440 (* targetm
.strip_name_encoding
) (frame_table_label
));
25443 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25444 and the change to the stack pointer. */
25447 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
25454 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25455 if (hard_frame_needed
)
25456 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
25457 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
25458 || (hard_frame_needed
25459 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
25462 p
= rtvec_alloc (i
);
25465 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
25466 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
25469 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
25472 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25473 and set the appropriate attributes for the generated insn. Return the
25474 first insn which adjusts the stack pointer or the last insn before
25475 the stack adjustment loop.
25477 SIZE_INT is used to create the CFI note for the allocation.
25479 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25480 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25482 ORIG_SP contains the backchain value that must be stored at *sp. */
25485 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int
, rtx orig_sp
)
25489 rtx size_rtx
= GEN_INT (-size_int
);
25490 if (size_int
> 32767)
25492 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25493 /* Need a note here so that try_split doesn't get confused. */
25494 if (get_last_insn () == NULL_RTX
)
25495 emit_note (NOTE_INSN_DELETED
);
25496 insn
= emit_move_insn (tmp_reg
, size_rtx
);
25497 try_split (PATTERN (insn
), insn
, 0);
25498 size_rtx
= tmp_reg
;
25501 if (Pmode
== SImode
)
25502 insn
= emit_insn (gen_movsi_update_stack (stack_pointer_rtx
,
25507 insn
= emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx
,
25511 rtx par
= PATTERN (insn
);
25512 gcc_assert (GET_CODE (par
) == PARALLEL
);
25513 rtx set
= XVECEXP (par
, 0, 0);
25514 gcc_assert (GET_CODE (set
) == SET
);
25515 rtx mem
= SET_DEST (set
);
25516 gcc_assert (MEM_P (mem
));
25517 MEM_NOTRAP_P (mem
) = 1;
25518 set_mem_alias_set (mem
, get_frame_alias_set ());
25520 RTX_FRAME_RELATED_P (insn
) = 1;
25521 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25522 gen_rtx_SET (stack_pointer_rtx
,
25523 gen_rtx_PLUS (Pmode
,
25525 GEN_INT (-size_int
))));
25527 /* Emit a blockage to ensure the allocation/probing insns are
25528 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25529 note for similar reasons. */
25530 if (flag_stack_clash_protection
)
25532 add_reg_note (insn
, REG_STACK_CHECK
, const0_rtx
);
25533 emit_insn (gen_blockage ());
25539 static HOST_WIDE_INT
25540 get_stack_clash_protection_probe_interval (void)
25542 return (HOST_WIDE_INT_1U
25543 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL
));
25546 static HOST_WIDE_INT
25547 get_stack_clash_protection_guard_size (void)
25549 return (HOST_WIDE_INT_1U
25550 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE
));
25553 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25554 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25556 COPY_REG, if non-null, should contain a copy of the original
25557 stack pointer at exit from this function.
25559 This is subtly different than the Ada probing in that it tries hard to
25560 prevent attacks that jump the stack guard. Thus it is never allowed to
25561 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25562 space without a suitable probe. */
25564 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size
,
25567 rtx orig_sp
= copy_reg
;
25569 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25571 /* Round the size down to a multiple of PROBE_INTERVAL. */
25572 HOST_WIDE_INT rounded_size
= ROUND_DOWN (orig_size
, probe_interval
);
25574 /* If explicitly requested,
25575 or the rounded size is not the same as the original size
25576 or the the rounded size is greater than a page,
25577 then we will need a copy of the original stack pointer. */
25578 if (rounded_size
!= orig_size
25579 || rounded_size
> probe_interval
25582 /* If the caller did not request a copy of the incoming stack
25583 pointer, then we use r0 to hold the copy. */
25585 orig_sp
= gen_rtx_REG (Pmode
, 0);
25586 emit_move_insn (orig_sp
, stack_pointer_rtx
);
25589 /* There's three cases here.
25591 One is a single probe which is the most common and most efficiently
25592 implemented as it does not have to have a copy of the original
25593 stack pointer if there are no residuals.
25595 Second is unrolled allocation/probes which we use if there's just
25596 a few of them. It needs to save the original stack pointer into a
25597 temporary for use as a source register in the allocation/probe.
25599 Last is a loop. This is the most uncommon case and least efficient. */
25600 rtx_insn
*retval
= NULL
;
25601 if (rounded_size
== probe_interval
)
25603 retval
= rs6000_emit_allocate_stack_1 (probe_interval
, stack_pointer_rtx
);
25605 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25607 else if (rounded_size
<= 8 * probe_interval
)
25609 /* The ABI requires using the store with update insns to allocate
25610 space and store the backchain into the stack
25612 So we save the current stack pointer into a temporary, then
25613 emit the store-with-update insns to store the saved stack pointer
25614 into the right location in each new page. */
25615 for (int i
= 0; i
< rounded_size
; i
+= probe_interval
)
25618 = rs6000_emit_allocate_stack_1 (probe_interval
, orig_sp
);
25620 /* Save the first stack adjustment in RETVAL. */
25625 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25629 /* Compute the ending address. */
25631 = copy_reg
? gen_rtx_REG (Pmode
, 0) : gen_rtx_REG (Pmode
, 12);
25632 rtx rs
= GEN_INT (-rounded_size
);
25634 if (add_operand (rs
, Pmode
))
25635 insn
= emit_insn (gen_add3_insn (end_addr
, stack_pointer_rtx
, rs
));
25638 emit_move_insn (end_addr
, GEN_INT (-rounded_size
));
25639 insn
= emit_insn (gen_add3_insn (end_addr
, end_addr
,
25640 stack_pointer_rtx
));
25641 /* Describe the effect of INSN to the CFI engine. */
25642 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25643 gen_rtx_SET (end_addr
,
25644 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
25647 RTX_FRAME_RELATED_P (insn
) = 1;
25649 /* Emit the loop. */
25651 retval
= emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx
,
25652 stack_pointer_rtx
, orig_sp
,
25655 retval
= emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx
,
25656 stack_pointer_rtx
, orig_sp
,
25658 RTX_FRAME_RELATED_P (retval
) = 1;
25659 /* Describe the effect of INSN to the CFI engine. */
25660 add_reg_note (retval
, REG_FRAME_RELATED_EXPR
,
25661 gen_rtx_SET (stack_pointer_rtx
, end_addr
));
25663 /* Emit a blockage to ensure the allocation/probing insns are
25664 not optimized, combined, removed, etc. Other cases handle this
25665 within their call to rs6000_emit_allocate_stack_1. */
25666 emit_insn (gen_blockage ());
25668 dump_stack_clash_frame_info (PROBE_LOOP
, rounded_size
!= orig_size
);
25671 if (orig_size
!= rounded_size
)
25673 /* Allocate (and implicitly probe) any residual space. */
25674 HOST_WIDE_INT residual
= orig_size
- rounded_size
;
25676 rtx_insn
*insn
= rs6000_emit_allocate_stack_1 (residual
, orig_sp
);
25678 /* If the residual was the only allocation, then we can return the
25679 allocating insn. */
25687 /* Emit the correct code for allocating stack space, as insns.
25688 If COPY_REG, make sure a copy of the old frame is left there.
25689 The generated code may use hard register 0 as a temporary. */
25692 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25695 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25696 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25697 rtx todec
= gen_int_mode (-size
, Pmode
);
25699 if (INTVAL (todec
) != -size
)
25701 warning (0, "stack frame too large");
25702 emit_insn (gen_trap ());
25706 if (crtl
->limit_stack
)
25708 if (REG_P (stack_limit_rtx
)
25709 && REGNO (stack_limit_rtx
) > 1
25710 && REGNO (stack_limit_rtx
) <= 31)
25713 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25716 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25718 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25720 && DEFAULT_ABI
== ABI_V4
25723 rtx toload
= gen_rtx_CONST (VOIDmode
,
25724 gen_rtx_PLUS (Pmode
,
25728 emit_insn (gen_elf_high (tmp_reg
, toload
));
25729 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25730 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25734 warning (0, "stack limit expression is not supported");
25737 if (flag_stack_clash_protection
)
25739 if (size
< get_stack_clash_protection_guard_size ())
25740 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
25743 rtx_insn
*insn
= rs6000_emit_probe_stack_range_stack_clash (size
,
25746 /* If we asked for a copy with an offset, then we still need add in
25748 if (copy_reg
&& copy_off
)
25749 emit_insn (gen_add3_insn (copy_reg
, copy_reg
, GEN_INT (copy_off
)));
25757 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25759 emit_move_insn (copy_reg
, stack_reg
);
25762 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25763 it now and set the alias set/attributes. The above gen_*_update
25764 calls will generate a PARALLEL with the MEM set being the first
25766 insn
= rs6000_emit_allocate_stack_1 (size
, stack_reg
);
25770 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25772 #if PROBE_INTERVAL > 32768
25773 #error Cannot use indexed addressing mode for stack probing
25776 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25777 inclusive. These are offsets from the current stack pointer. */
25780 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25782 /* See if we have a constant small number of probes to generate. If so,
25783 that's the easy case. */
25784 if (first
+ size
<= 32768)
25788 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25789 it exceeds SIZE. If only one probe is needed, this will not
25790 generate any code. Then probe at FIRST + SIZE. */
25791 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25792 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25795 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25799 /* Otherwise, do the same as above, but in a loop. Note that we must be
25800 extra careful with variables wrapping around because we might be at
25801 the very top (or the very bottom) of the address space and we have
25802 to be able to handle this case properly; in particular, we use an
25803 equality test for the loop condition. */
25806 HOST_WIDE_INT rounded_size
;
25807 rtx r12
= gen_rtx_REG (Pmode
, 12);
25808 rtx r0
= gen_rtx_REG (Pmode
, 0);
25810 /* Sanity check for the addressing mode we're going to use. */
25811 gcc_assert (first
<= 32768);
25813 /* Step 1: round SIZE to the previous multiple of the interval. */
25815 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25818 /* Step 2: compute initial and final value of the loop counter. */
25820 /* TEST_ADDR = SP + FIRST. */
25821 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25824 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25825 if (rounded_size
> 32768)
25827 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25828 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25831 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25835 /* Step 3: the loop
25839 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25842 while (TEST_ADDR != LAST_ADDR)
25844 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25845 until it is equal to ROUNDED_SIZE. */
25848 emit_insn (gen_probe_stack_rangedi (r12
, r12
, stack_pointer_rtx
, r0
));
25850 emit_insn (gen_probe_stack_rangesi (r12
, r12
, stack_pointer_rtx
, r0
));
25853 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25854 that SIZE is equal to ROUNDED_SIZE. */
25856 if (size
!= rounded_size
)
25857 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25861 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25862 addresses, not offsets. */
25864 static const char *
25865 output_probe_stack_range_1 (rtx reg1
, rtx reg2
)
25867 static int labelno
= 0;
25871 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25874 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25876 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25878 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25879 output_asm_insn ("addi %0,%0,%1", xops
);
25881 /* Probe at TEST_ADDR. */
25882 xops
[1] = gen_rtx_REG (Pmode
, 0);
25883 output_asm_insn ("stw %1,0(%0)", xops
);
25885 /* Test if TEST_ADDR == LAST_ADDR. */
25888 output_asm_insn ("cmpd 0,%0,%1", xops
);
25890 output_asm_insn ("cmpw 0,%0,%1", xops
);
25893 fputs ("\tbne 0,", asm_out_file
);
25894 assemble_name_raw (asm_out_file
, loop_lab
);
25895 fputc ('\n', asm_out_file
);
25900 /* This function is called when rs6000_frame_related is processing
25901 SETs within a PARALLEL, and returns whether the REGNO save ought to
25902 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25903 for out-of-line register save functions, store multiple, and the
25904 Darwin world_save. They may contain registers that don't really
25908 interesting_frame_related_regno (unsigned int regno
)
25910 /* Saves apparently of r0 are actually saving LR. It doesn't make
25911 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25912 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25913 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25914 as frame related. */
25917 /* If we see CR2 then we are here on a Darwin world save. Saves of
25918 CR2 signify the whole CR is being saved. This is a long-standing
25919 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25920 that CR needs to be saved. */
25921 if (regno
== CR2_REGNO
)
25923 /* Omit frame info for any user-defined global regs. If frame info
25924 is supplied for them, frame unwinding will restore a user reg.
25925 Also omit frame info for any reg we don't need to save, as that
25926 bloats frame info and can cause problems with shrink wrapping.
25927 Since global regs won't be seen as needing to be saved, both of
25928 these conditions are covered by save_reg_p. */
25929 return save_reg_p (regno
);
25932 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25933 addresses, not offsets.
25935 REG2 contains the backchain that must be stored into *sp at each allocation.
25937 This is subtly different than the Ada probing above in that it tries hard
25938 to prevent attacks that jump the stack guard. Thus, it is never allowed
25939 to allocate more than PROBE_INTERVAL bytes of stack space without a
25942 static const char *
25943 output_probe_stack_range_stack_clash (rtx reg1
, rtx reg2
, rtx reg3
)
25945 static int labelno
= 0;
25949 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25951 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25953 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25955 /* This allocates and probes. */
25958 xops
[2] = GEN_INT (-probe_interval
);
25960 output_asm_insn ("stdu %1,%2(%0)", xops
);
25962 output_asm_insn ("stwu %1,%2(%0)", xops
);
25964 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25968 output_asm_insn ("cmpd 0,%0,%1", xops
);
25970 output_asm_insn ("cmpw 0,%0,%1", xops
);
25972 fputs ("\tbne 0,", asm_out_file
);
25973 assemble_name_raw (asm_out_file
, loop_lab
);
25974 fputc ('\n', asm_out_file
);
25979 /* Wrapper around the output_probe_stack_range routines. */
25981 output_probe_stack_range (rtx reg1
, rtx reg2
, rtx reg3
)
25983 if (flag_stack_clash_protection
)
25984 return output_probe_stack_range_stack_clash (reg1
, reg2
, reg3
);
25986 return output_probe_stack_range_1 (reg1
, reg3
);
25989 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25990 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25991 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25992 deduce these equivalences by itself so it wasn't necessary to hold
25993 its hand so much. Don't be tempted to always supply d2_f_d_e with
25994 the actual cfa register, ie. r31 when we are using a hard frame
25995 pointer. That fails when saving regs off r1, and sched moves the
25996 r31 setup past the reg saves. */
25999 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
26000 rtx reg2
, rtx repl2
)
26004 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
26006 gcc_checking_assert (val
== 0);
26010 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
26013 rtx pat
= PATTERN (insn
);
26014 if (!repl
&& !reg2
)
26016 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26017 if (GET_CODE (pat
) == PARALLEL
)
26018 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
26019 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
26021 rtx set
= XVECEXP (pat
, 0, i
);
26023 if (!REG_P (SET_SRC (set
))
26024 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
26025 RTX_FRAME_RELATED_P (set
) = 1;
26027 RTX_FRAME_RELATED_P (insn
) = 1;
26031 /* We expect that 'pat' is either a SET or a PARALLEL containing
26032 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26033 are important so they all have to be marked RTX_FRAME_RELATED_P.
26034 Call simplify_replace_rtx on the SETs rather than the whole insn
26035 so as to leave the other stuff alone (for example USE of r12). */
26037 set_used_flags (pat
);
26038 if (GET_CODE (pat
) == SET
)
26041 pat
= simplify_replace_rtx (pat
, reg
, repl
);
26043 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
26045 else if (GET_CODE (pat
) == PARALLEL
)
26047 pat
= shallow_copy_rtx (pat
);
26048 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
26050 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
26051 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
26053 rtx set
= XVECEXP (pat
, 0, i
);
26056 set
= simplify_replace_rtx (set
, reg
, repl
);
26058 set
= simplify_replace_rtx (set
, reg2
, repl2
);
26059 XVECEXP (pat
, 0, i
) = set
;
26061 if (!REG_P (SET_SRC (set
))
26062 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
26063 RTX_FRAME_RELATED_P (set
) = 1;
26067 gcc_unreachable ();
26069 RTX_FRAME_RELATED_P (insn
) = 1;
26070 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
26075 /* Returns an insn that has a vrsave set operation with the
26076 appropriate CLOBBERs. */
26079 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
26082 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
26083 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26086 = gen_rtx_SET (vrsave
,
26087 gen_rtx_UNSPEC_VOLATILE (SImode
,
26088 gen_rtvec (2, reg
, vrsave
),
26089 UNSPECV_SET_VRSAVE
));
26093 /* We need to clobber the registers in the mask so the scheduler
26094 does not move sets to VRSAVE before sets of AltiVec registers.
26096 However, if the function receives nonlocal gotos, reload will set
26097 all call saved registers live. We will end up with:
26099 (set (reg 999) (mem))
26100 (parallel [ (set (reg vrsave) (unspec blah))
26101 (clobber (reg 999))])
26103 The clobber will cause the store into reg 999 to be dead, and
26104 flow will attempt to delete an epilogue insn. In this case, we
26105 need an unspec use/set of the register. */
26107 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
26108 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
26110 if (!epiloguep
|| call_used_regs
[i
])
26111 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
26112 gen_rtx_REG (V4SImode
, i
));
26115 rtx reg
= gen_rtx_REG (V4SImode
, i
);
26118 = gen_rtx_SET (reg
,
26119 gen_rtx_UNSPEC (V4SImode
,
26120 gen_rtvec (1, reg
), 27));
26124 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
26126 for (i
= 0; i
< nclobs
; ++i
)
26127 XVECEXP (insn
, 0, i
) = clobs
[i
];
26133 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
26137 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
26138 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
26139 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
26143 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
26145 return gen_frame_set (reg
, frame_reg
, offset
, false);
26149 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
26151 return gen_frame_set (reg
, frame_reg
, offset
, true);
26154 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26155 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26158 emit_frame_save (rtx frame_reg
, machine_mode mode
,
26159 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
26163 /* Some cases that need register indexed addressing. */
26164 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
26165 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
26167 reg
= gen_rtx_REG (mode
, regno
);
26168 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
26169 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
26170 NULL_RTX
, NULL_RTX
);
26173 /* Emit an offset memory reference suitable for a frame store, while
26174 converting to a valid addressing mode. */
26177 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
26179 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
26182 #ifndef TARGET_FIX_AND_CONTINUE
26183 #define TARGET_FIX_AND_CONTINUE 0
26186 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26187 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26188 #define LAST_SAVRES_REGISTER 31
26189 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26200 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
26202 /* Temporary holding space for an out-of-line register save/restore
26204 static char savres_routine_name
[30];
26206 /* Return the name for an out-of-line register save/restore routine.
26207 We are saving/restoring GPRs if GPR is true. */
26210 rs6000_savres_routine_name (int regno
, int sel
)
26212 const char *prefix
= "";
26213 const char *suffix
= "";
26215 /* Different targets are supposed to define
26216 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26217 routine name could be defined with:
26219 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26221 This is a nice idea in practice, but in reality, things are
26222 complicated in several ways:
26224 - ELF targets have save/restore routines for GPRs.
26226 - PPC64 ELF targets have routines for save/restore of GPRs that
26227 differ in what they do with the link register, so having a set
26228 prefix doesn't work. (We only use one of the save routines at
26229 the moment, though.)
26231 - PPC32 elf targets have "exit" versions of the restore routines
26232 that restore the link register and can save some extra space.
26233 These require an extra suffix. (There are also "tail" versions
26234 of the restore routines and "GOT" versions of the save routines,
26235 but we don't generate those at present. Same problems apply,
26238 We deal with all this by synthesizing our own prefix/suffix and
26239 using that for the simple sprintf call shown above. */
26240 if (DEFAULT_ABI
== ABI_V4
)
26245 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26246 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
26247 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26248 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
26249 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26250 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26254 if ((sel
& SAVRES_LR
))
26257 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26259 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26260 /* No out-of-line save/restore routines for GPRs on AIX. */
26261 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
26265 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26266 prefix
= ((sel
& SAVRES_SAVE
)
26267 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
26268 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
26269 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26271 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26272 if ((sel
& SAVRES_LR
))
26273 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
26277 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
26278 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
26281 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26282 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26287 if (DEFAULT_ABI
== ABI_DARWIN
)
26289 /* The Darwin approach is (slightly) different, in order to be
26290 compatible with code generated by the system toolchain. There is a
26291 single symbol for the start of save sequence, and the code here
26292 embeds an offset into that code on the basis of the first register
26294 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
26295 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26296 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
26297 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
26298 (regno
- 13) * 4, prefix
, regno
);
26299 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26300 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
26301 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
26302 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26303 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
26304 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
26309 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
26311 return savres_routine_name
;
26314 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26315 We are saving/restoring GPRs if GPR is true. */
26318 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
26320 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26321 ? info
->first_gp_reg_save
26322 : (sel
& SAVRES_REG
) == SAVRES_FPR
26323 ? info
->first_fp_reg_save
- 32
26324 : (sel
& SAVRES_REG
) == SAVRES_VR
26325 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
26330 /* Don't generate bogus routine names. */
26331 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
26332 && regno
<= LAST_SAVRES_REGISTER
26333 && select
>= 0 && select
<= 12);
26335 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
26341 name
= rs6000_savres_routine_name (regno
, sel
);
26343 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
26344 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
26345 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
26351 /* Emit a sequence of insns, including a stack tie if needed, for
26352 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26353 reset the stack pointer, but move the base of the frame into
26354 reg UPDT_REGNO for use by out-of-line register restore routines. */
26357 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
26358 unsigned updt_regno
)
26360 /* If there is nothing to do, don't do anything. */
26361 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
26364 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
26366 /* This blockage is needed so that sched doesn't decide to move
26367 the sp change before the register restores. */
26368 if (DEFAULT_ABI
== ABI_V4
)
26369 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
26370 GEN_INT (frame_off
)));
26372 /* If we are restoring registers out-of-line, we will be using the
26373 "exit" variants of the restore routines, which will reset the
26374 stack for us. But we do need to point updt_reg into the
26375 right place for those routines. */
26376 if (frame_off
!= 0)
26377 return emit_insn (gen_add3_insn (updt_reg_rtx
,
26378 frame_reg_rtx
, GEN_INT (frame_off
)));
26380 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
26385 /* Return the register number used as a pointer by out-of-line
26386 save/restore functions. */
26388 static inline unsigned
26389 ptr_regno_for_savres (int sel
)
26391 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26392 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
26393 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
26396 /* Construct a parallel rtx describing the effect of a call to an
26397 out-of-line register save/restore routine, and emit the insn
26398 or jump_insn as appropriate. */
26401 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
26402 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
26403 machine_mode reg_mode
, int sel
)
26406 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
26407 int reg_size
= GET_MODE_SIZE (reg_mode
);
26414 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26415 ? info
->first_gp_reg_save
26416 : (sel
& SAVRES_REG
) == SAVRES_FPR
26417 ? info
->first_fp_reg_save
26418 : (sel
& SAVRES_REG
) == SAVRES_VR
26419 ? info
->first_altivec_reg_save
26421 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26423 : (sel
& SAVRES_REG
) == SAVRES_FPR
26425 : (sel
& SAVRES_REG
) == SAVRES_VR
26426 ? LAST_ALTIVEC_REGNO
+ 1
26428 n_regs
= end_reg
- start_reg
;
26429 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
26430 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
26433 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26434 RTVEC_ELT (p
, offset
++) = ret_rtx
;
26436 RTVEC_ELT (p
, offset
++)
26437 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
26439 sym
= rs6000_savres_routine_sym (info
, sel
);
26440 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
26442 use_reg
= ptr_regno_for_savres (sel
);
26443 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26445 /* Vector regs are saved/restored using [reg+reg] addressing. */
26446 RTVEC_ELT (p
, offset
++)
26447 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26448 RTVEC_ELT (p
, offset
++)
26449 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
26452 RTVEC_ELT (p
, offset
++)
26453 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26455 for (i
= 0; i
< end_reg
- start_reg
; i
++)
26456 RTVEC_ELT (p
, i
+ offset
)
26457 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
26458 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
26459 (sel
& SAVRES_SAVE
) != 0);
26461 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26462 RTVEC_ELT (p
, i
+ offset
)
26463 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
26465 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
26467 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26469 insn
= emit_jump_insn (par
);
26470 JUMP_LABEL (insn
) = ret_rtx
;
26473 insn
= emit_insn (par
);
26477 /* Emit prologue code to store CR fields that need to be saved into REG. This
26478 function should only be called when moving the non-volatile CRs to REG, it
26479 is not a general purpose routine to move the entire set of CRs to REG.
26480 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26484 rs6000_emit_prologue_move_from_cr (rtx reg
)
26486 /* Only the ELFv2 ABI allows storing only selected fields. */
26487 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
26489 int i
, cr_reg
[8], count
= 0;
26491 /* Collect CR fields that must be saved. */
26492 for (i
= 0; i
< 8; i
++)
26493 if (save_reg_p (CR0_REGNO
+ i
))
26494 cr_reg
[count
++] = i
;
26496 /* If it's just a single one, use mfcrf. */
26499 rtvec p
= rtvec_alloc (1);
26500 rtvec r
= rtvec_alloc (2);
26501 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
26502 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
26504 = gen_rtx_SET (reg
,
26505 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
26507 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26511 /* ??? It might be better to handle count == 2 / 3 cases here
26512 as well, using logical operations to combine the values. */
26515 emit_insn (gen_prologue_movesi_from_cr (reg
));
26518 /* Return whether the split-stack arg pointer (r12) is used. */
26521 split_stack_arg_pointer_used_p (void)
26523 /* If the pseudo holding the arg pointer is no longer a pseudo,
26524 then the arg pointer is used. */
26525 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
26526 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
26527 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
26528 < FIRST_PSEUDO_REGISTER
)))
26531 /* Unfortunately we also need to do some code scanning, since
26532 r12 may have been substituted for the pseudo. */
26534 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
26535 FOR_BB_INSNS (bb
, insn
)
26536 if (NONDEBUG_INSN_P (insn
))
26538 /* A call destroys r12. */
26543 FOR_EACH_INSN_USE (use
, insn
)
26545 rtx x
= DF_REF_REG (use
);
26546 if (REG_P (x
) && REGNO (x
) == 12)
26550 FOR_EACH_INSN_DEF (def
, insn
)
26552 rtx x
= DF_REF_REG (def
);
26553 if (REG_P (x
) && REGNO (x
) == 12)
26557 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
26560 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26563 rs6000_global_entry_point_needed_p (void)
26565 /* Only needed for the ELFv2 ABI. */
26566 if (DEFAULT_ABI
!= ABI_ELFv2
)
26569 /* With -msingle-pic-base, we assume the whole program shares the same
26570 TOC, so no global entry point prologues are needed anywhere. */
26571 if (TARGET_SINGLE_PIC_BASE
)
26574 /* Ensure we have a global entry point for thunks. ??? We could
26575 avoid that if the target routine doesn't need a global entry point,
26576 but we do not know whether this is the case at this point. */
26577 if (cfun
->is_thunk
)
26580 /* For regular functions, rs6000_emit_prologue sets this flag if the
26581 routine ever uses the TOC pointer. */
26582 return cfun
->machine
->r2_setup_needed
;
26585 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26587 rs6000_get_separate_components (void)
26589 rs6000_stack_t
*info
= rs6000_stack_info ();
26591 if (WORLD_SAVE_P (info
))
26594 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
26595 && !(info
->savres_strategy
& REST_MULTIPLE
));
26597 /* Component 0 is the save/restore of LR (done via GPR0).
26598 Components 13..31 are the save/restore of GPR13..GPR31.
26599 Components 46..63 are the save/restore of FPR14..FPR31. */
26601 cfun
->machine
->n_components
= 64;
26603 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26604 bitmap_clear (components
);
26606 int reg_size
= TARGET_32BIT
? 4 : 8;
26607 int fp_reg_size
= 8;
26609 /* The GPRs we need saved to the frame. */
26610 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
26611 && (info
->savres_strategy
& REST_INLINE_GPRS
))
26613 int offset
= info
->gp_save_offset
;
26615 offset
+= info
->total_size
;
26617 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26619 if (IN_RANGE (offset
, -0x8000, 0x7fff)
26620 && save_reg_p (regno
))
26621 bitmap_set_bit (components
, regno
);
26623 offset
+= reg_size
;
26627 /* Don't mess with the hard frame pointer. */
26628 if (frame_pointer_needed
)
26629 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
26631 /* Don't mess with the fixed TOC register. */
26632 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
26633 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
26634 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
26635 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
26637 /* The FPRs we need saved to the frame. */
26638 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
26639 && (info
->savres_strategy
& REST_INLINE_FPRS
))
26641 int offset
= info
->fp_save_offset
;
26643 offset
+= info
->total_size
;
26645 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26647 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
26648 bitmap_set_bit (components
, regno
);
26650 offset
+= fp_reg_size
;
26654 /* Optimize LR save and restore if we can. This is component 0. Any
26655 out-of-line register save/restore routines need LR. */
26656 if (info
->lr_save_p
26657 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
26658 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
26659 && (info
->savres_strategy
& REST_INLINE_GPRS
)
26660 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
26661 && (info
->savres_strategy
& REST_INLINE_FPRS
)
26662 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
26663 && (info
->savres_strategy
& REST_INLINE_VRS
))
26665 int offset
= info
->lr_save_offset
;
26667 offset
+= info
->total_size
;
26668 if (IN_RANGE (offset
, -0x8000, 0x7fff))
26669 bitmap_set_bit (components
, 0);
26675 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26677 rs6000_components_for_bb (basic_block bb
)
26679 rs6000_stack_t
*info
= rs6000_stack_info ();
26681 bitmap in
= DF_LIVE_IN (bb
);
26682 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26683 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26685 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26686 bitmap_clear (components
);
26688 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26691 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26692 if (bitmap_bit_p (in
, regno
)
26693 || bitmap_bit_p (gen
, regno
)
26694 || bitmap_bit_p (kill
, regno
))
26695 bitmap_set_bit (components
, regno
);
26698 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26699 if (bitmap_bit_p (in
, regno
)
26700 || bitmap_bit_p (gen
, regno
)
26701 || bitmap_bit_p (kill
, regno
))
26702 bitmap_set_bit (components
, regno
);
26704 /* The link register. */
26705 if (bitmap_bit_p (in
, LR_REGNO
)
26706 || bitmap_bit_p (gen
, LR_REGNO
)
26707 || bitmap_bit_p (kill
, LR_REGNO
))
26708 bitmap_set_bit (components
, 0);
26713 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26715 rs6000_disqualify_components (sbitmap components
, edge e
,
26716 sbitmap edge_components
, bool /*is_prologue*/)
26718 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26719 live where we want to place that code. */
26720 if (bitmap_bit_p (edge_components
, 0)
26721 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26724 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26725 "on entry to bb %d\n", e
->dest
->index
);
26726 bitmap_clear_bit (components
, 0);
26730 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26732 rs6000_emit_prologue_components (sbitmap components
)
26734 rs6000_stack_t
*info
= rs6000_stack_info ();
26735 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26736 ? HARD_FRAME_POINTER_REGNUM
26737 : STACK_POINTER_REGNUM
);
26739 machine_mode reg_mode
= Pmode
;
26740 int reg_size
= TARGET_32BIT
? 4 : 8;
26741 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26743 int fp_reg_size
= 8;
26745 /* Prologue for LR. */
26746 if (bitmap_bit_p (components
, 0))
26748 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26749 rtx_insn
*insn
= emit_move_insn (reg
, gen_rtx_REG (reg_mode
, LR_REGNO
));
26750 RTX_FRAME_RELATED_P (insn
) = 1;
26751 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
26753 int offset
= info
->lr_save_offset
;
26755 offset
+= info
->total_size
;
26757 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26758 RTX_FRAME_RELATED_P (insn
) = 1;
26759 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26760 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26761 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26764 /* Prologue for the GPRs. */
26765 int offset
= info
->gp_save_offset
;
26767 offset
+= info
->total_size
;
26769 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26771 if (bitmap_bit_p (components
, i
))
26773 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26774 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26775 RTX_FRAME_RELATED_P (insn
) = 1;
26776 rtx set
= copy_rtx (single_set (insn
));
26777 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26780 offset
+= reg_size
;
26783 /* Prologue for the FPRs. */
26784 offset
= info
->fp_save_offset
;
26786 offset
+= info
->total_size
;
26788 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26790 if (bitmap_bit_p (components
, i
))
26792 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26793 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26794 RTX_FRAME_RELATED_P (insn
) = 1;
26795 rtx set
= copy_rtx (single_set (insn
));
26796 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26799 offset
+= fp_reg_size
;
26803 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26805 rs6000_emit_epilogue_components (sbitmap components
)
26807 rs6000_stack_t
*info
= rs6000_stack_info ();
26808 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26809 ? HARD_FRAME_POINTER_REGNUM
26810 : STACK_POINTER_REGNUM
);
26812 machine_mode reg_mode
= Pmode
;
26813 int reg_size
= TARGET_32BIT
? 4 : 8;
26815 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26817 int fp_reg_size
= 8;
26819 /* Epilogue for the FPRs. */
26820 int offset
= info
->fp_save_offset
;
26822 offset
+= info
->total_size
;
26824 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26826 if (bitmap_bit_p (components
, i
))
26828 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26829 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26830 RTX_FRAME_RELATED_P (insn
) = 1;
26831 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26834 offset
+= fp_reg_size
;
26837 /* Epilogue for the GPRs. */
26838 offset
= info
->gp_save_offset
;
26840 offset
+= info
->total_size
;
26842 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26844 if (bitmap_bit_p (components
, i
))
26846 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26847 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26848 RTX_FRAME_RELATED_P (insn
) = 1;
26849 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26852 offset
+= reg_size
;
26855 /* Epilogue for LR. */
26856 if (bitmap_bit_p (components
, 0))
26858 int offset
= info
->lr_save_offset
;
26860 offset
+= info
->total_size
;
26862 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26863 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26865 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26866 insn
= emit_move_insn (lr
, reg
);
26867 RTX_FRAME_RELATED_P (insn
) = 1;
26868 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26872 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26874 rs6000_set_handled_components (sbitmap components
)
26876 rs6000_stack_t
*info
= rs6000_stack_info ();
26878 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26879 if (bitmap_bit_p (components
, i
))
26880 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26882 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26883 if (bitmap_bit_p (components
, i
))
26884 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26886 if (bitmap_bit_p (components
, 0))
26887 cfun
->machine
->lr_is_wrapped_separately
= true;
26890 /* VRSAVE is a bit vector representing which AltiVec registers
26891 are used. The OS uses this to determine which vector
26892 registers to save on a context switch. We need to save
26893 VRSAVE on the stack frame, add whatever AltiVec registers we
26894 used in this function, and do the corresponding magic in the
26897 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26898 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26900 /* Get VRSAVE into a GPR. */
26901 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26902 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26904 emit_insn (gen_get_vrsave_internal (reg
));
26906 emit_insn (gen_rtx_SET (reg
, vrsave
));
26909 int offset
= info
->vrsave_save_offset
+ frame_off
;
26910 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26912 /* Include the registers in the mask. */
26913 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26915 emit_insn (generate_set_vrsave (reg
, info
, 0));
26918 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26919 called, it left the arg pointer to the old stack in r29. Otherwise, the
26920 arg pointer is the top of the current frame. */
26922 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26923 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26925 cfun
->machine
->split_stack_argp_used
= true;
26929 rtx r12
= gen_rtx_REG (Pmode
, 12);
26930 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26931 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26932 emit_insn_before (set_r12
, sp_adjust
);
26934 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
26936 rtx r12
= gen_rtx_REG (Pmode
, 12);
26937 if (frame_off
== 0)
26938 emit_move_insn (r12
, frame_reg_rtx
);
26940 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
26945 rtx r12
= gen_rtx_REG (Pmode
, 12);
26946 rtx r29
= gen_rtx_REG (Pmode
, 29);
26947 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
26948 rtx not_more
= gen_label_rtx ();
26951 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26952 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
26953 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
26955 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26956 JUMP_LABEL (jump
) = not_more
;
26957 LABEL_NUSES (not_more
) += 1;
26958 emit_move_insn (r12
, r29
);
26959 emit_label (not_more
);
26963 /* Emit function prologue as insns. */
26966 rs6000_emit_prologue (void)
26968 rs6000_stack_t
*info
= rs6000_stack_info ();
26969 machine_mode reg_mode
= Pmode
;
26970 int reg_size
= TARGET_32BIT
? 4 : 8;
26971 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26973 int fp_reg_size
= 8;
26974 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26975 rtx frame_reg_rtx
= sp_reg_rtx
;
26976 unsigned int cr_save_regno
;
26977 rtx cr_save_rtx
= NULL_RTX
;
26980 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
26981 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
26982 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
26983 int using_split_stack
= (flag_split_stack
26984 && (lookup_attribute ("no_split_stack",
26985 DECL_ATTRIBUTES (cfun
->decl
))
26988 /* Offset to top of frame for frame_reg and sp respectively. */
26989 HOST_WIDE_INT frame_off
= 0;
26990 HOST_WIDE_INT sp_off
= 0;
26991 /* sp_adjust is the stack adjusting instruction, tracked so that the
26992 insn setting up the split-stack arg pointer can be emitted just
26993 prior to it, when r12 is not used here for other purposes. */
26994 rtx_insn
*sp_adjust
= 0;
26997 /* Track and check usage of r0, r11, r12. */
26998 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
26999 #define START_USE(R) do \
27001 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27002 reg_inuse |= 1 << (R); \
27004 #define END_USE(R) do \
27006 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27007 reg_inuse &= ~(1 << (R)); \
27009 #define NOT_INUSE(R) do \
27011 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27014 #define START_USE(R) do {} while (0)
27015 #define END_USE(R) do {} while (0)
27016 #define NOT_INUSE(R) do {} while (0)
27019 if (DEFAULT_ABI
== ABI_ELFv2
27020 && !TARGET_SINGLE_PIC_BASE
)
27022 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
27024 /* With -mminimal-toc we may generate an extra use of r2 below. */
27025 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
27026 && !constant_pool_empty_p ())
27027 cfun
->machine
->r2_setup_needed
= true;
27031 if (flag_stack_usage_info
)
27032 current_function_static_stack_size
= info
->total_size
;
27034 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
27036 HOST_WIDE_INT size
= info
->total_size
;
27038 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
27040 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
27041 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27042 size
- get_stack_check_protect ());
27045 rs6000_emit_probe_stack_range (get_stack_check_protect (), size
);
27048 if (TARGET_FIX_AND_CONTINUE
)
27050 /* gdb on darwin arranges to forward a function from the old
27051 address by modifying the first 5 instructions of the function
27052 to branch to the overriding function. This is necessary to
27053 permit function pointers that point to the old function to
27054 actually forward to the new function. */
27055 emit_insn (gen_nop ());
27056 emit_insn (gen_nop ());
27057 emit_insn (gen_nop ());
27058 emit_insn (gen_nop ());
27059 emit_insn (gen_nop ());
27062 /* Handle world saves specially here. */
27063 if (WORLD_SAVE_P (info
))
27070 /* save_world expects lr in r0. */
27071 reg0
= gen_rtx_REG (Pmode
, 0);
27072 if (info
->lr_save_p
)
27074 insn
= emit_move_insn (reg0
,
27075 gen_rtx_REG (Pmode
, LR_REGNO
));
27076 RTX_FRAME_RELATED_P (insn
) = 1;
27079 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27080 assumptions about the offsets of various bits of the stack
27082 gcc_assert (info
->gp_save_offset
== -220
27083 && info
->fp_save_offset
== -144
27084 && info
->lr_save_offset
== 8
27085 && info
->cr_save_offset
== 4
27088 && (!crtl
->calls_eh_return
27089 || info
->ehrd_offset
== -432)
27090 && info
->vrsave_save_offset
== -224
27091 && info
->altivec_save_offset
== -416);
27093 treg
= gen_rtx_REG (SImode
, 11);
27094 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
27096 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27097 in R11. It also clobbers R12, so beware! */
27099 /* Preserve CR2 for save_world prologues */
27101 sz
+= 32 - info
->first_gp_reg_save
;
27102 sz
+= 64 - info
->first_fp_reg_save
;
27103 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
27104 p
= rtvec_alloc (sz
);
27106 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
27107 gen_rtx_REG (SImode
,
27109 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
27110 gen_rtx_SYMBOL_REF (Pmode
,
27112 /* We do floats first so that the instruction pattern matches
27114 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
27116 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
27118 info
->first_fp_reg_save
+ i
),
27120 info
->fp_save_offset
+ frame_off
+ 8 * i
);
27121 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
27123 = gen_frame_store (gen_rtx_REG (V4SImode
,
27124 info
->first_altivec_reg_save
+ i
),
27126 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
27127 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27129 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27131 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27133 /* CR register traditionally saved as CR2. */
27135 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
27136 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
27137 /* Explain about use of R0. */
27138 if (info
->lr_save_p
)
27140 = gen_frame_store (reg0
,
27141 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
27142 /* Explain what happens to the stack pointer. */
27144 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
27145 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
27148 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27149 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27150 treg
, GEN_INT (-info
->total_size
));
27151 sp_off
= frame_off
= info
->total_size
;
27154 strategy
= info
->savres_strategy
;
27156 /* For V.4, update stack before we do any saving and set back pointer. */
27157 if (! WORLD_SAVE_P (info
)
27159 && (DEFAULT_ABI
== ABI_V4
27160 || crtl
->calls_eh_return
))
27162 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
27163 || !(strategy
& SAVE_INLINE_GPRS
)
27164 || !(strategy
& SAVE_INLINE_VRS
));
27165 int ptr_regno
= -1;
27166 rtx ptr_reg
= NULL_RTX
;
27169 if (info
->total_size
< 32767)
27170 frame_off
= info
->total_size
;
27173 else if (info
->cr_save_p
27175 || info
->first_fp_reg_save
< 64
27176 || info
->first_gp_reg_save
< 32
27177 || info
->altivec_size
!= 0
27178 || info
->vrsave_size
!= 0
27179 || crtl
->calls_eh_return
)
27183 /* The prologue won't be saving any regs so there is no need
27184 to set up a frame register to access any frame save area.
27185 We also won't be using frame_off anywhere below, but set
27186 the correct value anyway to protect against future
27187 changes to this function. */
27188 frame_off
= info
->total_size
;
27190 if (ptr_regno
!= -1)
27192 /* Set up the frame offset to that needed by the first
27193 out-of-line save function. */
27194 START_USE (ptr_regno
);
27195 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27196 frame_reg_rtx
= ptr_reg
;
27197 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
27198 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
27199 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
27200 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
27201 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
27202 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27203 frame_off
= -ptr_off
;
27205 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27207 if (REGNO (frame_reg_rtx
) == 12)
27209 sp_off
= info
->total_size
;
27210 if (frame_reg_rtx
!= sp_reg_rtx
)
27211 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27214 /* If we use the link register, get it into r0. */
27215 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
27216 && !cfun
->machine
->lr_is_wrapped_separately
)
27218 rtx addr
, reg
, mem
;
27220 reg
= gen_rtx_REG (Pmode
, 0);
27222 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27223 RTX_FRAME_RELATED_P (insn
) = 1;
27225 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
27226 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
27228 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27229 GEN_INT (info
->lr_save_offset
+ frame_off
));
27230 mem
= gen_rtx_MEM (Pmode
, addr
);
27231 /* This should not be of rs6000_sr_alias_set, because of
27232 __builtin_return_address. */
27234 insn
= emit_move_insn (mem
, reg
);
27235 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27236 NULL_RTX
, NULL_RTX
);
27241 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27242 r12 will be needed by out-of-line gpr restore. */
27243 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27244 && !(strategy
& (SAVE_INLINE_GPRS
27245 | SAVE_NOINLINE_GPRS_SAVES_LR
))
27247 if (!WORLD_SAVE_P (info
)
27249 && REGNO (frame_reg_rtx
) != cr_save_regno
27250 && !(using_static_chain_p
&& cr_save_regno
== 11)
27251 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
27253 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
27254 START_USE (cr_save_regno
);
27255 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27258 /* Do any required saving of fpr's. If only one or two to save, do
27259 it ourselves. Otherwise, call function. */
27260 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
27262 int offset
= info
->fp_save_offset
+ frame_off
;
27263 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
27266 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
27267 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
27268 sp_off
- frame_off
);
27270 offset
+= fp_reg_size
;
27273 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
27275 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27276 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27277 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27278 rtx ptr_reg
= frame_reg_rtx
;
27280 if (REGNO (frame_reg_rtx
) == ptr_regno
)
27281 gcc_checking_assert (frame_off
== 0);
27284 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27285 NOT_INUSE (ptr_regno
);
27286 emit_insn (gen_add3_insn (ptr_reg
,
27287 frame_reg_rtx
, GEN_INT (frame_off
)));
27289 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27290 info
->fp_save_offset
,
27291 info
->lr_save_offset
,
27293 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
27294 NULL_RTX
, NULL_RTX
);
27299 /* Save GPRs. This is done as a PARALLEL if we are using
27300 the store-multiple instructions. */
27301 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
27303 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
27304 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
27305 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27306 rtx ptr_reg
= frame_reg_rtx
;
27307 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
27308 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
27311 if (ptr_regno
== 12)
27314 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27316 /* Need to adjust r11 (r12) if we saved any FPRs. */
27317 if (end_save
+ frame_off
!= 0)
27319 rtx offset
= GEN_INT (end_save
+ frame_off
);
27322 frame_off
= -end_save
;
27324 NOT_INUSE (ptr_regno
);
27325 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27327 else if (!ptr_set_up
)
27329 NOT_INUSE (ptr_regno
);
27330 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27332 ptr_off
= -end_save
;
27333 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27334 info
->gp_save_offset
+ ptr_off
,
27335 info
->lr_save_offset
+ ptr_off
,
27337 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
27338 NULL_RTX
, NULL_RTX
);
27342 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
27346 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
27347 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27349 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27351 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27352 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27353 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27354 NULL_RTX
, NULL_RTX
);
27356 else if (!WORLD_SAVE_P (info
))
27358 int offset
= info
->gp_save_offset
+ frame_off
;
27359 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
27362 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
27363 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
27364 sp_off
- frame_off
);
27366 offset
+= reg_size
;
27370 if (crtl
->calls_eh_return
)
27377 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27378 if (regno
== INVALID_REGNUM
)
27382 p
= rtvec_alloc (i
);
27386 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27387 if (regno
== INVALID_REGNUM
)
27391 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
27393 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
27394 RTVEC_ELT (p
, i
) = set
;
27395 RTX_FRAME_RELATED_P (set
) = 1;
27398 insn
= emit_insn (gen_blockage ());
27399 RTX_FRAME_RELATED_P (insn
) = 1;
27400 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
27403 /* In AIX ABI we need to make sure r2 is really saved. */
27404 if (TARGET_AIX
&& crtl
->calls_eh_return
)
27406 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
27407 rtx join_insn
, note
;
27408 rtx_insn
*save_insn
;
27409 long toc_restore_insn
;
27411 tmp_reg
= gen_rtx_REG (Pmode
, 11);
27412 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
27413 if (using_static_chain_p
)
27416 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
27420 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27421 /* Peek at instruction to which this function returns. If it's
27422 restoring r2, then we know we've already saved r2. We can't
27423 unconditionally save r2 because the value we have will already
27424 be updated if we arrived at this function via a plt call or
27425 toc adjusting stub. */
27426 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
27427 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
27428 + RS6000_TOC_SAVE_SLOT
);
27429 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
27430 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
27431 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
27432 validate_condition_mode (EQ
, CCUNSmode
);
27433 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
27434 emit_insn (gen_rtx_SET (compare_result
,
27435 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
27436 toc_save_done
= gen_label_rtx ();
27437 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27438 gen_rtx_EQ (VOIDmode
, compare_result
,
27440 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
27442 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27443 JUMP_LABEL (jump
) = toc_save_done
;
27444 LABEL_NUSES (toc_save_done
) += 1;
27446 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
27447 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
27448 sp_off
- frame_off
);
27450 emit_label (toc_save_done
);
27452 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27453 have a CFG that has different saves along different paths.
27454 Move the note to a dummy blockage insn, which describes that
27455 R2 is unconditionally saved after the label. */
27456 /* ??? An alternate representation might be a special insn pattern
27457 containing both the branch and the store. That might let the
27458 code that minimizes the number of DW_CFA_advance opcodes better
27459 freedom in placing the annotations. */
27460 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
27462 remove_note (save_insn
, note
);
27464 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
27465 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
27466 RTX_FRAME_RELATED_P (save_insn
) = 0;
27468 join_insn
= emit_insn (gen_blockage ());
27469 REG_NOTES (join_insn
) = note
;
27470 RTX_FRAME_RELATED_P (join_insn
) = 1;
27472 if (using_static_chain_p
)
27474 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
27481 /* Save CR if we use any that must be preserved. */
27482 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
27484 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27485 GEN_INT (info
->cr_save_offset
+ frame_off
));
27486 rtx mem
= gen_frame_mem (SImode
, addr
);
27488 /* If we didn't copy cr before, do so now using r0. */
27489 if (cr_save_rtx
== NULL_RTX
)
27492 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
27493 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27496 /* Saving CR requires a two-instruction sequence: one instruction
27497 to move the CR to a general-purpose register, and a second
27498 instruction that stores the GPR to memory.
27500 We do not emit any DWARF CFI records for the first of these,
27501 because we cannot properly represent the fact that CR is saved in
27502 a register. One reason is that we cannot express that multiple
27503 CR fields are saved; another reason is that on 64-bit, the size
27504 of the CR register in DWARF (4 bytes) differs from the size of
27505 a general-purpose register.
27507 This means if any intervening instruction were to clobber one of
27508 the call-saved CR fields, we'd have incorrect CFI. To prevent
27509 this from happening, we mark the store to memory as a use of
27510 those CR fields, which prevents any such instruction from being
27511 scheduled in between the two instructions. */
27516 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
27517 for (i
= 0; i
< 8; i
++)
27518 if (save_reg_p (CR0_REGNO
+ i
))
27519 crsave_v
[n_crsave
++]
27520 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27522 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
27523 gen_rtvec_v (n_crsave
, crsave_v
)));
27524 END_USE (REGNO (cr_save_rtx
));
27526 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27527 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27528 so we need to construct a frame expression manually. */
27529 RTX_FRAME_RELATED_P (insn
) = 1;
27531 /* Update address to be stack-pointer relative, like
27532 rs6000_frame_related would do. */
27533 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
27534 GEN_INT (info
->cr_save_offset
+ sp_off
));
27535 mem
= gen_frame_mem (SImode
, addr
);
27537 if (DEFAULT_ABI
== ABI_ELFv2
)
27539 /* In the ELFv2 ABI we generate separate CFI records for each
27540 CR field that was actually saved. They all point to the
27541 same 32-bit stack slot. */
27545 for (i
= 0; i
< 8; i
++)
27546 if (save_reg_p (CR0_REGNO
+ i
))
27549 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27551 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
27555 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27556 gen_rtx_PARALLEL (VOIDmode
,
27557 gen_rtvec_v (n_crframe
, crframe
)));
27561 /* In other ABIs, by convention, we use a single CR regnum to
27562 represent the fact that all call-saved CR fields are saved.
27563 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27564 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
27565 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
27569 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27570 *separate* slots if the routine calls __builtin_eh_return, so
27571 that they can be independently restored by the unwinder. */
27572 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
27574 int i
, cr_off
= info
->ehcr_offset
;
27577 /* ??? We might get better performance by using multiple mfocrf
27579 crsave
= gen_rtx_REG (SImode
, 0);
27580 emit_insn (gen_prologue_movesi_from_cr (crsave
));
27582 for (i
= 0; i
< 8; i
++)
27583 if (!call_used_regs
[CR0_REGNO
+ i
])
27585 rtvec p
= rtvec_alloc (2);
27587 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
27589 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27591 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27593 RTX_FRAME_RELATED_P (insn
) = 1;
27594 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27595 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
27596 sp_reg_rtx
, cr_off
+ sp_off
));
27598 cr_off
+= reg_size
;
27602 /* If we are emitting stack probes, but allocate no stack, then
27603 just note that in the dump file. */
27604 if (flag_stack_clash_protection
27607 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME
, false);
27609 /* Update stack and set back pointer unless this is V.4,
27610 for which it was done previously. */
27611 if (!WORLD_SAVE_P (info
) && info
->push_p
27612 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
27614 rtx ptr_reg
= NULL
;
27617 /* If saving altivec regs we need to be able to address all save
27618 locations using a 16-bit offset. */
27619 if ((strategy
& SAVE_INLINE_VRS
) == 0
27620 || (info
->altivec_size
!= 0
27621 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
27622 + info
->total_size
- frame_off
) > 32767)
27623 || (info
->vrsave_size
!= 0
27624 && (info
->vrsave_save_offset
27625 + info
->total_size
- frame_off
) > 32767))
27627 int sel
= SAVRES_SAVE
| SAVRES_VR
;
27628 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27630 if (using_static_chain_p
27631 && ptr_regno
== STATIC_CHAIN_REGNUM
)
27633 if (REGNO (frame_reg_rtx
) != ptr_regno
)
27634 START_USE (ptr_regno
);
27635 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27636 frame_reg_rtx
= ptr_reg
;
27637 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27638 frame_off
= -ptr_off
;
27640 else if (REGNO (frame_reg_rtx
) == 1)
27641 frame_off
= info
->total_size
;
27642 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27644 if (REGNO (frame_reg_rtx
) == 12)
27646 sp_off
= info
->total_size
;
27647 if (frame_reg_rtx
!= sp_reg_rtx
)
27648 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27651 /* Set frame pointer, if needed. */
27652 if (frame_pointer_needed
)
27654 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
27656 RTX_FRAME_RELATED_P (insn
) = 1;
27659 /* Save AltiVec registers if needed. Save here because the red zone does
27660 not always include AltiVec registers. */
27661 if (!WORLD_SAVE_P (info
)
27662 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27664 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27666 /* Oddly, the vector save/restore functions point r0 at the end
27667 of the save area, then use r11 or r12 to load offsets for
27668 [reg+reg] addressing. */
27669 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27670 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27671 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27673 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27675 if (scratch_regno
== 12)
27677 if (end_save
+ frame_off
!= 0)
27679 rtx offset
= GEN_INT (end_save
+ frame_off
);
27681 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27684 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27686 ptr_off
= -end_save
;
27687 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27688 info
->altivec_save_offset
+ ptr_off
,
27689 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27690 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27691 NULL_RTX
, NULL_RTX
);
27692 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27694 /* The oddity mentioned above clobbered our frame reg. */
27695 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27696 frame_off
= ptr_off
;
27699 else if (!WORLD_SAVE_P (info
)
27700 && info
->altivec_size
!= 0)
27704 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27705 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27707 rtx areg
, savereg
, mem
;
27708 HOST_WIDE_INT offset
;
27710 offset
= (info
->altivec_save_offset
+ frame_off
27711 + 16 * (i
- info
->first_altivec_reg_save
));
27713 savereg
= gen_rtx_REG (V4SImode
, i
);
27715 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27717 mem
= gen_frame_mem (V4SImode
,
27718 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27719 GEN_INT (offset
)));
27720 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27726 areg
= gen_rtx_REG (Pmode
, 0);
27727 emit_move_insn (areg
, GEN_INT (offset
));
27729 /* AltiVec addressing mode is [reg+reg]. */
27730 mem
= gen_frame_mem (V4SImode
,
27731 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27733 /* Rather than emitting a generic move, force use of the stvx
27734 instruction, which we always want on ISA 2.07 (power8) systems.
27735 In particular we don't want xxpermdi/stxvd2x for little
27737 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27740 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27741 areg
, GEN_INT (offset
));
27745 /* VRSAVE is a bit vector representing which AltiVec registers
27746 are used. The OS uses this to determine which vector
27747 registers to save on a context switch. We need to save
27748 VRSAVE on the stack frame, add whatever AltiVec registers we
27749 used in this function, and do the corresponding magic in the
27752 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27754 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27755 be using r12 as frame_reg_rtx and r11 as the static chain
27756 pointer for nested functions. */
27757 int save_regno
= 12;
27758 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27759 && !using_static_chain_p
)
27761 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27764 if (using_static_chain_p
)
27767 NOT_INUSE (save_regno
);
27769 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27772 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27773 if (!TARGET_SINGLE_PIC_BASE
27774 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27775 && !constant_pool_empty_p ())
27776 || (DEFAULT_ABI
== ABI_V4
27777 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27778 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27780 /* If emit_load_toc_table will use the link register, we need to save
27781 it. We use R12 for this purpose because emit_load_toc_table
27782 can use register 0. This allows us to use a plain 'blr' to return
27783 from the procedure more often. */
27784 int save_LR_around_toc_setup
= (TARGET_ELF
27785 && DEFAULT_ABI
== ABI_V4
27787 && ! info
->lr_save_p
27788 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27789 if (save_LR_around_toc_setup
)
27791 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27792 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27795 insn
= emit_move_insn (tmp
, lr
);
27796 RTX_FRAME_RELATED_P (insn
) = 1;
27798 rs6000_emit_load_toc_table (TRUE
);
27800 insn
= emit_move_insn (lr
, tmp
);
27801 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27802 RTX_FRAME_RELATED_P (insn
) = 1;
27805 rs6000_emit_load_toc_table (TRUE
);
27809 if (!TARGET_SINGLE_PIC_BASE
27810 && DEFAULT_ABI
== ABI_DARWIN
27811 && flag_pic
&& crtl
->uses_pic_offset_table
)
27813 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27814 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27816 /* Save and restore LR locally around this call (in R0). */
27817 if (!info
->lr_save_p
)
27818 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27820 emit_insn (gen_load_macho_picbase (src
));
27822 emit_move_insn (gen_rtx_REG (Pmode
,
27823 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27826 if (!info
->lr_save_p
)
27827 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27831 /* If we need to, save the TOC register after doing the stack setup.
27832 Do not emit eh frame info for this save. The unwinder wants info,
27833 conceptually attached to instructions in this function, about
27834 register values in the caller of this function. This R2 may have
27835 already been changed from the value in the caller.
27836 We don't attempt to write accurate DWARF EH frame info for R2
27837 because code emitted by gcc for a (non-pointer) function call
27838 doesn't save and restore R2. Instead, R2 is managed out-of-line
27839 by a linker generated plt call stub when the function resides in
27840 a shared library. This behavior is costly to describe in DWARF,
27841 both in terms of the size of DWARF info and the time taken in the
27842 unwinder to interpret it. R2 changes, apart from the
27843 calls_eh_return case earlier in this function, are handled by
27844 linux-unwind.h frob_update_context. */
27845 if (rs6000_save_toc_in_prologue_p ())
27847 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27848 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27851 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27852 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27853 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27856 /* Output .extern statements for the save/restore routines we use. */
27859 rs6000_output_savres_externs (FILE *file
)
27861 rs6000_stack_t
*info
= rs6000_stack_info ();
27863 if (TARGET_DEBUG_STACK
)
27864 debug_stack_info (info
);
27866 /* Write .extern for any function we will call to save and restore
27868 if (info
->first_fp_reg_save
< 64
27873 int regno
= info
->first_fp_reg_save
- 32;
27875 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27877 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27878 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27879 name
= rs6000_savres_routine_name (regno
, sel
);
27880 fprintf (file
, "\t.extern %s\n", name
);
27882 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27884 bool lr
= (info
->savres_strategy
27885 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27886 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27887 name
= rs6000_savres_routine_name (regno
, sel
);
27888 fprintf (file
, "\t.extern %s\n", name
);
27893 /* Write function prologue. */
27896 rs6000_output_function_prologue (FILE *file
)
27898 if (!cfun
->is_thunk
)
27899 rs6000_output_savres_externs (file
);
27901 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27902 immediately after the global entry point label. */
27903 if (rs6000_global_entry_point_needed_p ())
27905 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27907 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27909 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27911 /* In the small and medium code models, we assume the TOC is less
27912 2 GB away from the text section, so it can be computed via the
27913 following two-instruction sequence. */
27916 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27917 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27918 assemble_name (file
, buf
);
27919 fprintf (file
, "@ha\n");
27920 fprintf (file
, "\taddi 2,2,.TOC.-");
27921 assemble_name (file
, buf
);
27922 fprintf (file
, "@l\n");
27926 /* In the large code model, we allow arbitrary offsets between the
27927 TOC and the text section, so we have to load the offset from
27928 memory. The data field is emitted directly before the global
27929 entry point in rs6000_elf_declare_function_name. */
27932 #ifdef HAVE_AS_ENTRY_MARKERS
27933 /* If supported by the linker, emit a marker relocation. If the
27934 total code size of the final executable or shared library
27935 happens to fit into 2 GB after all, the linker will replace
27936 this code sequence with the sequence for the small or medium
27938 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
27940 fprintf (file
, "\tld 2,");
27941 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
27942 assemble_name (file
, buf
);
27943 fprintf (file
, "-");
27944 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27945 assemble_name (file
, buf
);
27946 fprintf (file
, "(12)\n");
27947 fprintf (file
, "\tadd 2,2,12\n");
27950 fputs ("\t.localentry\t", file
);
27951 assemble_name (file
, name
);
27952 fputs (",.-", file
);
27953 assemble_name (file
, name
);
27954 fputs ("\n", file
);
27957 /* Output -mprofile-kernel code. This needs to be done here instead of
27958 in output_function_profile since it must go after the ELFv2 ABI
27959 local entry point. */
27960 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
27962 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
27963 gcc_assert (!TARGET_32BIT
);
27965 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
27967 /* In the ELFv2 ABI we have no compiler stack word. It must be
27968 the resposibility of _mcount to preserve the static chain
27969 register if required. */
27970 if (DEFAULT_ABI
!= ABI_ELFv2
27971 && cfun
->static_chain_decl
!= NULL
)
27973 asm_fprintf (file
, "\tstd %s,24(%s)\n",
27974 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27975 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27976 asm_fprintf (file
, "\tld %s,24(%s)\n",
27977 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27980 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27983 rs6000_pic_labelno
++;
27986 /* -mprofile-kernel code calls mcount before the function prolog,
27987 so a profiled leaf function should stay a leaf function. */
27989 rs6000_keep_leaf_when_profiled ()
27991 return TARGET_PROFILE_KERNEL
;
27994 /* Non-zero if vmx regs are restored before the frame pop, zero if
27995 we restore after the pop when possible. */
27996 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27998 /* Restoring cr is a two step process: loading a reg from the frame
27999 save, then moving the reg to cr. For ABI_V4 we must let the
28000 unwinder know that the stack location is no longer valid at or
28001 before the stack deallocation, but we can't emit a cfa_restore for
28002 cr at the stack deallocation like we do for other registers.
28003 The trouble is that it is possible for the move to cr to be
28004 scheduled after the stack deallocation. So say exactly where cr
28005 is located on each of the two insns. */
28008 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
28010 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
28011 rtx reg
= gen_rtx_REG (SImode
, regno
);
28012 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
28014 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
28016 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
28017 rtx set
= gen_rtx_SET (reg
, cr
);
28019 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
28020 RTX_FRAME_RELATED_P (insn
) = 1;
28025 /* Reload CR from REG. */
28028 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
28033 if (using_mfcr_multiple
)
28035 for (i
= 0; i
< 8; i
++)
28036 if (save_reg_p (CR0_REGNO
+ i
))
28038 gcc_assert (count
);
28041 if (using_mfcr_multiple
&& count
> 1)
28047 p
= rtvec_alloc (count
);
28050 for (i
= 0; i
< 8; i
++)
28051 if (save_reg_p (CR0_REGNO
+ i
))
28053 rtvec r
= rtvec_alloc (2);
28054 RTVEC_ELT (r
, 0) = reg
;
28055 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
28056 RTVEC_ELT (p
, ndx
) =
28057 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
28058 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
28061 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28062 gcc_assert (ndx
== count
);
28064 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28065 CR field separately. */
28066 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
28068 for (i
= 0; i
< 8; i
++)
28069 if (save_reg_p (CR0_REGNO
+ i
))
28070 add_reg_note (insn
, REG_CFA_RESTORE
,
28071 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28073 RTX_FRAME_RELATED_P (insn
) = 1;
28077 for (i
= 0; i
< 8; i
++)
28078 if (save_reg_p (CR0_REGNO
+ i
))
28080 rtx insn
= emit_insn (gen_movsi_to_cr_one
28081 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28083 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28084 CR field separately, attached to the insn that in fact
28085 restores this particular CR field. */
28086 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
28088 add_reg_note (insn
, REG_CFA_RESTORE
,
28089 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28091 RTX_FRAME_RELATED_P (insn
) = 1;
28095 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28096 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
28097 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
28099 rtx_insn
*insn
= get_last_insn ();
28100 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
28102 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
28103 RTX_FRAME_RELATED_P (insn
) = 1;
28107 /* Like cr, the move to lr instruction can be scheduled after the
28108 stack deallocation, but unlike cr, its stack frame save is still
28109 valid. So we only need to emit the cfa_restore on the correct
28113 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
28115 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
28116 rtx reg
= gen_rtx_REG (Pmode
, regno
);
28118 emit_move_insn (reg
, mem
);
28122 restore_saved_lr (int regno
, bool exit_func
)
28124 rtx reg
= gen_rtx_REG (Pmode
, regno
);
28125 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
28126 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
28128 if (!exit_func
&& flag_shrink_wrap
)
28130 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
28131 RTX_FRAME_RELATED_P (insn
) = 1;
28136 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
28138 if (DEFAULT_ABI
== ABI_ELFv2
)
28141 for (i
= 0; i
< 8; i
++)
28142 if (save_reg_p (CR0_REGNO
+ i
))
28144 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
28145 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
28149 else if (info
->cr_save_p
)
28150 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28151 gen_rtx_REG (SImode
, CR2_REGNO
),
28154 if (info
->lr_save_p
)
28155 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28156 gen_rtx_REG (Pmode
, LR_REGNO
),
28158 return cfa_restores
;
28161 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28162 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28163 below stack pointer not cloberred by signals. */
28166 offset_below_red_zone_p (HOST_WIDE_INT offset
)
28168 return offset
< (DEFAULT_ABI
== ABI_V4
28170 : TARGET_32BIT
? -220 : -288);
28173 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28176 emit_cfa_restores (rtx cfa_restores
)
28178 rtx_insn
*insn
= get_last_insn ();
28179 rtx
*loc
= ®_NOTES (insn
);
28182 loc
= &XEXP (*loc
, 1);
28183 *loc
= cfa_restores
;
28184 RTX_FRAME_RELATED_P (insn
) = 1;
28187 /* Emit function epilogue as insns. */
28190 rs6000_emit_epilogue (int sibcall
)
28192 rs6000_stack_t
*info
;
28193 int restoring_GPRs_inline
;
28194 int restoring_FPRs_inline
;
28195 int using_load_multiple
;
28196 int using_mtcr_multiple
;
28197 int use_backchain_to_restore_sp
;
28200 HOST_WIDE_INT frame_off
= 0;
28201 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
28202 rtx frame_reg_rtx
= sp_reg_rtx
;
28203 rtx cfa_restores
= NULL_RTX
;
28205 rtx cr_save_reg
= NULL_RTX
;
28206 machine_mode reg_mode
= Pmode
;
28207 int reg_size
= TARGET_32BIT
? 4 : 8;
28208 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
28210 int fp_reg_size
= 8;
28213 unsigned ptr_regno
;
28215 info
= rs6000_stack_info ();
28217 strategy
= info
->savres_strategy
;
28218 using_load_multiple
= strategy
& REST_MULTIPLE
;
28219 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
28220 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
28221 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
28222 || rs6000_cpu
== PROCESSOR_PPC603
28223 || rs6000_cpu
== PROCESSOR_PPC750
28225 /* Restore via the backchain when we have a large frame, since this
28226 is more efficient than an addis, addi pair. The second condition
28227 here will not trigger at the moment; We don't actually need a
28228 frame pointer for alloca, but the generic parts of the compiler
28229 give us one anyway. */
28230 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
28231 ? info
->lr_save_offset
28233 || (cfun
->calls_alloca
28234 && !frame_pointer_needed
));
28235 restore_lr
= (info
->lr_save_p
28236 && (restoring_FPRs_inline
28237 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
28238 && (restoring_GPRs_inline
28239 || info
->first_fp_reg_save
< 64)
28240 && !cfun
->machine
->lr_is_wrapped_separately
);
28243 if (WORLD_SAVE_P (info
))
28247 const char *alloc_rname
;
28250 /* eh_rest_world_r10 will return to the location saved in the LR
28251 stack slot (which is not likely to be our caller.)
28252 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28253 rest_world is similar, except any R10 parameter is ignored.
28254 The exception-handling stuff that was here in 2.95 is no
28255 longer necessary. */
28258 + 32 - info
->first_gp_reg_save
28259 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
28260 + 63 + 1 - info
->first_fp_reg_save
);
28262 strcpy (rname
, ((crtl
->calls_eh_return
) ?
28263 "*eh_rest_world_r10" : "*rest_world"));
28264 alloc_rname
= ggc_strdup (rname
);
28267 RTVEC_ELT (p
, j
++) = ret_rtx
;
28269 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
28270 /* The instruction pattern requires a clobber here;
28271 it is shared with the restVEC helper. */
28273 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
28276 /* CR register traditionally saved as CR2. */
28277 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
28279 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
28280 if (flag_shrink_wrap
)
28282 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28283 gen_rtx_REG (Pmode
, LR_REGNO
),
28285 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28289 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28291 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
28293 = gen_frame_load (reg
,
28294 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
28295 if (flag_shrink_wrap
28296 && save_reg_p (info
->first_gp_reg_save
+ i
))
28297 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28299 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
28301 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
28303 = gen_frame_load (reg
,
28304 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
28305 if (flag_shrink_wrap
28306 && save_reg_p (info
->first_altivec_reg_save
+ i
))
28307 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28309 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
28311 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
28312 ? DFmode
: SFmode
),
28313 info
->first_fp_reg_save
+ i
);
28315 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28316 if (flag_shrink_wrap
28317 && save_reg_p (info
->first_fp_reg_save
+ i
))
28318 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28321 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
28323 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
28325 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
28327 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
28329 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
28330 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28332 if (flag_shrink_wrap
)
28334 REG_NOTES (insn
) = cfa_restores
;
28335 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28336 RTX_FRAME_RELATED_P (insn
) = 1;
28341 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28343 frame_off
= info
->total_size
;
28345 /* Restore AltiVec registers if we must do so before adjusting the
28347 if (info
->altivec_size
!= 0
28348 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28349 || (DEFAULT_ABI
!= ABI_V4
28350 && offset_below_red_zone_p (info
->altivec_save_offset
))))
28353 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28355 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
28356 if (use_backchain_to_restore_sp
)
28358 int frame_regno
= 11;
28360 if ((strategy
& REST_INLINE_VRS
) == 0)
28362 /* Of r11 and r12, select the one not clobbered by an
28363 out-of-line restore function for the frame register. */
28364 frame_regno
= 11 + 12 - scratch_regno
;
28366 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
28367 emit_move_insn (frame_reg_rtx
,
28368 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28371 else if (frame_pointer_needed
)
28372 frame_reg_rtx
= hard_frame_pointer_rtx
;
28374 if ((strategy
& REST_INLINE_VRS
) == 0)
28376 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28378 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28379 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28381 if (end_save
+ frame_off
!= 0)
28383 rtx offset
= GEN_INT (end_save
+ frame_off
);
28385 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28388 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28390 ptr_off
= -end_save
;
28391 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28392 info
->altivec_save_offset
+ ptr_off
,
28393 0, V4SImode
, SAVRES_VR
);
28397 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28398 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28400 rtx addr
, areg
, mem
, insn
;
28401 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28402 HOST_WIDE_INT offset
28403 = (info
->altivec_save_offset
+ frame_off
28404 + 16 * (i
- info
->first_altivec_reg_save
));
28406 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28408 mem
= gen_frame_mem (V4SImode
,
28409 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28410 GEN_INT (offset
)));
28411 insn
= gen_rtx_SET (reg
, mem
);
28415 areg
= gen_rtx_REG (Pmode
, 0);
28416 emit_move_insn (areg
, GEN_INT (offset
));
28418 /* AltiVec addressing mode is [reg+reg]. */
28419 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28420 mem
= gen_frame_mem (V4SImode
, addr
);
28422 /* Rather than emitting a generic move, force use of the
28423 lvx instruction, which we always want. In particular we
28424 don't want lxvd2x/xxpermdi for little endian. */
28425 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28428 (void) emit_insn (insn
);
28432 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28433 if (((strategy
& REST_INLINE_VRS
) == 0
28434 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28435 && (flag_shrink_wrap
28436 || (offset_below_red_zone_p
28437 (info
->altivec_save_offset
28438 + 16 * (i
- info
->first_altivec_reg_save
))))
28441 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28442 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28446 /* Restore VRSAVE if we must do so before adjusting the stack. */
28447 if (info
->vrsave_size
!= 0
28448 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28449 || (DEFAULT_ABI
!= ABI_V4
28450 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
28454 if (frame_reg_rtx
== sp_reg_rtx
)
28456 if (use_backchain_to_restore_sp
)
28458 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28459 emit_move_insn (frame_reg_rtx
,
28460 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28463 else if (frame_pointer_needed
)
28464 frame_reg_rtx
= hard_frame_pointer_rtx
;
28467 reg
= gen_rtx_REG (SImode
, 12);
28468 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28469 info
->vrsave_save_offset
+ frame_off
));
28471 emit_insn (generate_set_vrsave (reg
, info
, 1));
28475 /* If we have a large stack frame, restore the old stack pointer
28476 using the backchain. */
28477 if (use_backchain_to_restore_sp
)
28479 if (frame_reg_rtx
== sp_reg_rtx
)
28481 /* Under V.4, don't reset the stack pointer until after we're done
28482 loading the saved registers. */
28483 if (DEFAULT_ABI
== ABI_V4
)
28484 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28486 insn
= emit_move_insn (frame_reg_rtx
,
28487 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28490 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28491 && DEFAULT_ABI
== ABI_V4
)
28492 /* frame_reg_rtx has been set up by the altivec restore. */
28496 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
28497 frame_reg_rtx
= sp_reg_rtx
;
28500 /* If we have a frame pointer, we can restore the old stack pointer
28502 else if (frame_pointer_needed
)
28504 frame_reg_rtx
= sp_reg_rtx
;
28505 if (DEFAULT_ABI
== ABI_V4
)
28506 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28507 /* Prevent reordering memory accesses against stack pointer restore. */
28508 else if (cfun
->calls_alloca
28509 || offset_below_red_zone_p (-info
->total_size
))
28510 rs6000_emit_stack_tie (frame_reg_rtx
, true);
28512 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
28513 GEN_INT (info
->total_size
)));
28516 else if (info
->push_p
28517 && DEFAULT_ABI
!= ABI_V4
28518 && !crtl
->calls_eh_return
)
28520 /* Prevent reordering memory accesses against stack pointer restore. */
28521 if (cfun
->calls_alloca
28522 || offset_below_red_zone_p (-info
->total_size
))
28523 rs6000_emit_stack_tie (frame_reg_rtx
, false);
28524 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
28525 GEN_INT (info
->total_size
)));
28528 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
28532 REG_NOTES (insn
) = cfa_restores
;
28533 cfa_restores
= NULL_RTX
;
28535 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28536 RTX_FRAME_RELATED_P (insn
) = 1;
28539 /* Restore AltiVec registers if we have not done so already. */
28540 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28541 && info
->altivec_size
!= 0
28542 && (DEFAULT_ABI
== ABI_V4
28543 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
28547 if ((strategy
& REST_INLINE_VRS
) == 0)
28549 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28551 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28552 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28553 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28555 if (end_save
+ frame_off
!= 0)
28557 rtx offset
= GEN_INT (end_save
+ frame_off
);
28559 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28562 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28564 ptr_off
= -end_save
;
28565 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28566 info
->altivec_save_offset
+ ptr_off
,
28567 0, V4SImode
, SAVRES_VR
);
28568 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
28570 /* Frame reg was clobbered by out-of-line save. Restore it
28571 from ptr_reg, and if we are calling out-of-line gpr or
28572 fpr restore set up the correct pointer and offset. */
28573 unsigned newptr_regno
= 1;
28574 if (!restoring_GPRs_inline
)
28576 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28577 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28578 newptr_regno
= ptr_regno_for_savres (sel
);
28579 end_save
= info
->gp_save_offset
+ info
->gp_size
;
28581 else if (!restoring_FPRs_inline
)
28583 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
28584 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28585 newptr_regno
= ptr_regno_for_savres (sel
);
28586 end_save
= info
->fp_save_offset
+ info
->fp_size
;
28589 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
28590 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
28592 if (end_save
+ ptr_off
!= 0)
28594 rtx offset
= GEN_INT (end_save
+ ptr_off
);
28596 frame_off
= -end_save
;
28598 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
28601 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
28606 frame_off
= ptr_off
;
28607 emit_move_insn (frame_reg_rtx
, ptr_reg
);
28613 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28614 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28616 rtx addr
, areg
, mem
, insn
;
28617 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28618 HOST_WIDE_INT offset
28619 = (info
->altivec_save_offset
+ frame_off
28620 + 16 * (i
- info
->first_altivec_reg_save
));
28622 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28624 mem
= gen_frame_mem (V4SImode
,
28625 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28626 GEN_INT (offset
)));
28627 insn
= gen_rtx_SET (reg
, mem
);
28631 areg
= gen_rtx_REG (Pmode
, 0);
28632 emit_move_insn (areg
, GEN_INT (offset
));
28634 /* AltiVec addressing mode is [reg+reg]. */
28635 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28636 mem
= gen_frame_mem (V4SImode
, addr
);
28638 /* Rather than emitting a generic move, force use of the
28639 lvx instruction, which we always want. In particular we
28640 don't want lxvd2x/xxpermdi for little endian. */
28641 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28644 (void) emit_insn (insn
);
28648 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28649 if (((strategy
& REST_INLINE_VRS
) == 0
28650 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28651 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28654 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28655 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28659 /* Restore VRSAVE if we have not done so already. */
28660 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28661 && info
->vrsave_size
!= 0
28662 && (DEFAULT_ABI
== ABI_V4
28663 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28667 reg
= gen_rtx_REG (SImode
, 12);
28668 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28669 info
->vrsave_save_offset
+ frame_off
));
28671 emit_insn (generate_set_vrsave (reg
, info
, 1));
28674 /* If we exit by an out-of-line restore function on ABI_V4 then that
28675 function will deallocate the stack, so we don't need to worry
28676 about the unwinder restoring cr from an invalid stack frame
28678 exit_func
= (!restoring_FPRs_inline
28679 || (!restoring_GPRs_inline
28680 && info
->first_fp_reg_save
== 64));
28682 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28683 *separate* slots if the routine calls __builtin_eh_return, so
28684 that they can be independently restored by the unwinder. */
28685 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28687 int i
, cr_off
= info
->ehcr_offset
;
28689 for (i
= 0; i
< 8; i
++)
28690 if (!call_used_regs
[CR0_REGNO
+ i
])
28692 rtx reg
= gen_rtx_REG (SImode
, 0);
28693 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28694 cr_off
+ frame_off
));
28696 insn
= emit_insn (gen_movsi_to_cr_one
28697 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28699 if (!exit_func
&& flag_shrink_wrap
)
28701 add_reg_note (insn
, REG_CFA_RESTORE
,
28702 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28704 RTX_FRAME_RELATED_P (insn
) = 1;
28707 cr_off
+= reg_size
;
28711 /* Get the old lr if we saved it. If we are restoring registers
28712 out-of-line, then the out-of-line routines can do this for us. */
28713 if (restore_lr
&& restoring_GPRs_inline
)
28714 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28716 /* Get the old cr if we saved it. */
28717 if (info
->cr_save_p
)
28719 unsigned cr_save_regno
= 12;
28721 if (!restoring_GPRs_inline
)
28723 /* Ensure we don't use the register used by the out-of-line
28724 gpr register restore below. */
28725 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28726 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28727 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28729 if (gpr_ptr_regno
== 12)
28730 cr_save_regno
= 11;
28731 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28733 else if (REGNO (frame_reg_rtx
) == 12)
28734 cr_save_regno
= 11;
28736 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28737 info
->cr_save_offset
+ frame_off
,
28741 /* Set LR here to try to overlap restores below. */
28742 if (restore_lr
&& restoring_GPRs_inline
)
28743 restore_saved_lr (0, exit_func
);
28745 /* Load exception handler data registers, if needed. */
28746 if (crtl
->calls_eh_return
)
28748 unsigned int i
, regno
;
28752 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28753 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28754 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28761 regno
= EH_RETURN_DATA_REGNO (i
);
28762 if (regno
== INVALID_REGNUM
)
28765 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28766 info
->ehrd_offset
+ frame_off
28767 + reg_size
* (int) i
);
28769 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28773 /* Restore GPRs. This is done as a PARALLEL if we are using
28774 the load-multiple instructions. */
28775 if (!restoring_GPRs_inline
)
28777 /* We are jumping to an out-of-line function. */
28779 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28780 bool can_use_exit
= end_save
== 0;
28781 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28784 /* Emit stack reset code if we need it. */
28785 ptr_regno
= ptr_regno_for_savres (sel
);
28786 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28788 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28789 else if (end_save
+ frame_off
!= 0)
28790 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28791 GEN_INT (end_save
+ frame_off
)));
28792 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28793 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28794 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28795 frame_off
= -end_save
;
28797 if (can_use_exit
&& info
->cr_save_p
)
28798 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28800 ptr_off
= -end_save
;
28801 rs6000_emit_savres_rtx (info
, ptr_reg
,
28802 info
->gp_save_offset
+ ptr_off
,
28803 info
->lr_save_offset
+ ptr_off
,
28806 else if (using_load_multiple
)
28809 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28810 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28812 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28814 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28815 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28819 int offset
= info
->gp_save_offset
+ frame_off
;
28820 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28823 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28825 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28826 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28829 offset
+= reg_size
;
28833 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28835 /* If the frame pointer was used then we can't delay emitting
28836 a REG_CFA_DEF_CFA note. This must happen on the insn that
28837 restores the frame pointer, r31. We may have already emitted
28838 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28839 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28840 be harmless if emitted. */
28841 if (frame_pointer_needed
)
28843 insn
= get_last_insn ();
28844 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28845 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28846 RTX_FRAME_RELATED_P (insn
) = 1;
28849 /* Set up cfa_restores. We always need these when
28850 shrink-wrapping. If not shrink-wrapping then we only need
28851 the cfa_restore when the stack location is no longer valid.
28852 The cfa_restores must be emitted on or before the insn that
28853 invalidates the stack, and of course must not be emitted
28854 before the insn that actually does the restore. The latter
28855 is why it is a bad idea to emit the cfa_restores as a group
28856 on the last instruction here that actually does a restore:
28857 That insn may be reordered with respect to others doing
28859 if (flag_shrink_wrap
28860 && !restoring_GPRs_inline
28861 && info
->first_fp_reg_save
== 64)
28862 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28864 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28866 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28868 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28869 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28873 if (!restoring_GPRs_inline
28874 && info
->first_fp_reg_save
== 64)
28876 /* We are jumping to an out-of-line function. */
28878 emit_cfa_restores (cfa_restores
);
28882 if (restore_lr
&& !restoring_GPRs_inline
)
28884 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28885 restore_saved_lr (0, exit_func
);
28888 /* Restore fpr's if we need to do it without calling a function. */
28889 if (restoring_FPRs_inline
)
28891 int offset
= info
->fp_save_offset
+ frame_off
;
28892 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28895 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28897 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28898 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28899 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28900 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28904 offset
+= fp_reg_size
;
28908 /* If we saved cr, restore it here. Just those that were used. */
28909 if (info
->cr_save_p
)
28910 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28912 /* If this is V.4, unwind the stack pointer after all of the loads
28913 have been done, or set up r11 if we are restoring fp out of line. */
28915 if (!restoring_FPRs_inline
)
28917 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28918 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28919 ptr_regno
= ptr_regno_for_savres (sel
);
28922 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28923 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28926 if (insn
&& restoring_FPRs_inline
)
28930 REG_NOTES (insn
) = cfa_restores
;
28931 cfa_restores
= NULL_RTX
;
28933 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28934 RTX_FRAME_RELATED_P (insn
) = 1;
28937 if (crtl
->calls_eh_return
)
28939 rtx sa
= EH_RETURN_STACKADJ_RTX
;
28940 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
28943 if (!sibcall
&& restoring_FPRs_inline
)
28947 /* We can't hang the cfa_restores off a simple return,
28948 since the shrink-wrap code sometimes uses an existing
28949 return. This means there might be a path from
28950 pre-prologue code to this return, and dwarf2cfi code
28951 wants the eh_frame unwinder state to be the same on
28952 all paths to any point. So we need to emit the
28953 cfa_restores before the return. For -m64 we really
28954 don't need epilogue cfa_restores at all, except for
28955 this irritating dwarf2cfi with shrink-wrap
28956 requirement; The stack red-zone means eh_frame info
28957 from the prologue telling the unwinder to restore
28958 from the stack is perfectly good right to the end of
28960 emit_insn (gen_blockage ());
28961 emit_cfa_restores (cfa_restores
);
28962 cfa_restores
= NULL_RTX
;
28965 emit_jump_insn (targetm
.gen_simple_return ());
28968 if (!sibcall
&& !restoring_FPRs_inline
)
28970 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28971 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
28973 RTVEC_ELT (p
, elt
++) = ret_rtx
;
28975 RTVEC_ELT (p
, elt
++)
28976 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
28978 /* We have to restore more than two FP registers, so branch to the
28979 restore function. It will return to our caller. */
28984 if (flag_shrink_wrap
)
28985 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28987 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
28988 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
28989 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
28990 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
28992 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
28994 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
28996 RTVEC_ELT (p
, elt
++)
28997 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28998 if (flag_shrink_wrap
28999 && save_reg_p (info
->first_fp_reg_save
+ i
))
29000 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
29003 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
29009 /* Ensure the cfa_restores are hung off an insn that won't
29010 be reordered above other restores. */
29011 emit_insn (gen_blockage ());
29013 emit_cfa_restores (cfa_restores
);
29017 /* Write function epilogue. */
29020 rs6000_output_function_epilogue (FILE *file
)
29023 macho_branch_islands ();
29026 rtx_insn
*insn
= get_last_insn ();
29027 rtx_insn
*deleted_debug_label
= NULL
;
29029 /* Mach-O doesn't support labels at the end of objects, so if
29030 it looks like we might want one, take special action.
29032 First, collect any sequence of deleted debug labels. */
29035 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
29037 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29038 notes only, instead set their CODE_LABEL_NUMBER to -1,
29039 otherwise there would be code generation differences
29040 in between -g and -g0. */
29041 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
29042 deleted_debug_label
= insn
;
29043 insn
= PREV_INSN (insn
);
29046 /* Second, if we have:
29049 then this needs to be detected, so skip past the barrier. */
29051 if (insn
&& BARRIER_P (insn
))
29052 insn
= PREV_INSN (insn
);
29054 /* Up to now we've only seen notes or barriers. */
29059 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
29060 /* Trailing label: <barrier>. */
29061 fputs ("\tnop\n", file
);
29064 /* Lastly, see if we have a completely empty function body. */
29065 while (insn
&& ! INSN_P (insn
))
29066 insn
= PREV_INSN (insn
);
29067 /* If we don't find any insns, we've got an empty function body;
29068 I.e. completely empty - without a return or branch. This is
29069 taken as the case where a function body has been removed
29070 because it contains an inline __builtin_unreachable(). GCC
29071 states that reaching __builtin_unreachable() means UB so we're
29072 not obliged to do anything special; however, we want
29073 non-zero-sized function bodies. To meet this, and help the
29074 user out, let's trap the case. */
29076 fputs ("\ttrap\n", file
);
29079 else if (deleted_debug_label
)
29080 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
29081 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
29082 CODE_LABEL_NUMBER (insn
) = -1;
29086 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29089 We don't output a traceback table if -finhibit-size-directive was
29090 used. The documentation for -finhibit-size-directive reads
29091 ``don't output a @code{.size} assembler directive, or anything
29092 else that would cause trouble if the function is split in the
29093 middle, and the two halves are placed at locations far apart in
29094 memory.'' The traceback table has this property, since it
29095 includes the offset from the start of the function to the
29096 traceback table itself.
29098 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29099 different traceback table. */
29100 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29101 && ! flag_inhibit_size_directive
29102 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
29104 const char *fname
= NULL
;
29105 const char *language_string
= lang_hooks
.name
;
29106 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
29108 int optional_tbtab
;
29109 rs6000_stack_t
*info
= rs6000_stack_info ();
29111 if (rs6000_traceback
== traceback_full
)
29112 optional_tbtab
= 1;
29113 else if (rs6000_traceback
== traceback_part
)
29114 optional_tbtab
= 0;
29116 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
29118 if (optional_tbtab
)
29120 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
29121 while (*fname
== '.') /* V.4 encodes . in the name */
29124 /* Need label immediately before tbtab, so we can compute
29125 its offset from the function start. */
29126 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29127 ASM_OUTPUT_LABEL (file
, fname
);
29130 /* The .tbtab pseudo-op can only be used for the first eight
29131 expressions, since it can't handle the possibly variable
29132 length fields that follow. However, if you omit the optional
29133 fields, the assembler outputs zeros for all optional fields
29134 anyways, giving each variable length field is minimum length
29135 (as defined in sys/debug.h). Thus we can not use the .tbtab
29136 pseudo-op at all. */
29138 /* An all-zero word flags the start of the tbtab, for debuggers
29139 that have to find it by searching forward from the entry
29140 point or from the current pc. */
29141 fputs ("\t.long 0\n", file
);
29143 /* Tbtab format type. Use format type 0. */
29144 fputs ("\t.byte 0,", file
);
29146 /* Language type. Unfortunately, there does not seem to be any
29147 official way to discover the language being compiled, so we
29148 use language_string.
29149 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29150 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29151 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29152 either, so for now use 0. */
29154 || ! strcmp (language_string
, "GNU GIMPLE")
29155 || ! strcmp (language_string
, "GNU Go")
29156 || ! strcmp (language_string
, "libgccjit"))
29158 else if (! strcmp (language_string
, "GNU F77")
29159 || lang_GNU_Fortran ())
29161 else if (! strcmp (language_string
, "GNU Pascal"))
29163 else if (! strcmp (language_string
, "GNU Ada"))
29165 else if (lang_GNU_CXX ()
29166 || ! strcmp (language_string
, "GNU Objective-C++"))
29168 else if (! strcmp (language_string
, "GNU Java"))
29170 else if (! strcmp (language_string
, "GNU Objective-C"))
29173 gcc_unreachable ();
29174 fprintf (file
, "%d,", i
);
29176 /* 8 single bit fields: global linkage (not set for C extern linkage,
29177 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29178 from start of procedure stored in tbtab, internal function, function
29179 has controlled storage, function has no toc, function uses fp,
29180 function logs/aborts fp operations. */
29181 /* Assume that fp operations are used if any fp reg must be saved. */
29182 fprintf (file
, "%d,",
29183 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
29185 /* 6 bitfields: function is interrupt handler, name present in
29186 proc table, function calls alloca, on condition directives
29187 (controls stack walks, 3 bits), saves condition reg, saves
29189 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29190 set up as a frame pointer, even when there is no alloca call. */
29191 fprintf (file
, "%d,",
29192 ((optional_tbtab
<< 6)
29193 | ((optional_tbtab
& frame_pointer_needed
) << 5)
29194 | (info
->cr_save_p
<< 1)
29195 | (info
->lr_save_p
)));
29197 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29199 fprintf (file
, "%d,",
29200 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
29202 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29203 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
29205 if (optional_tbtab
)
29207 /* Compute the parameter info from the function decl argument
29210 int next_parm_info_bit
= 31;
29212 for (decl
= DECL_ARGUMENTS (current_function_decl
);
29213 decl
; decl
= DECL_CHAIN (decl
))
29215 rtx parameter
= DECL_INCOMING_RTL (decl
);
29216 machine_mode mode
= GET_MODE (parameter
);
29218 if (GET_CODE (parameter
) == REG
)
29220 if (SCALAR_FLOAT_MODE_P (mode
))
29243 gcc_unreachable ();
29246 /* If only one bit will fit, don't or in this entry. */
29247 if (next_parm_info_bit
> 0)
29248 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
29249 next_parm_info_bit
-= 2;
29253 fixed_parms
+= ((GET_MODE_SIZE (mode
)
29254 + (UNITS_PER_WORD
- 1))
29256 next_parm_info_bit
-= 1;
29262 /* Number of fixed point parameters. */
29263 /* This is actually the number of words of fixed point parameters; thus
29264 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29265 fprintf (file
, "%d,", fixed_parms
);
29267 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29269 /* This is actually the number of fp registers that hold parameters;
29270 and thus the maximum value is 13. */
29271 /* Set parameters on stack bit if parameters are not in their original
29272 registers, regardless of whether they are on the stack? Xlc
29273 seems to set the bit when not optimizing. */
29274 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
29276 if (optional_tbtab
)
29278 /* Optional fields follow. Some are variable length. */
29280 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29281 float, 11 double float. */
29282 /* There is an entry for each parameter in a register, in the order
29283 that they occur in the parameter list. Any intervening arguments
29284 on the stack are ignored. If the list overflows a long (max
29285 possible length 34 bits) then completely leave off all elements
29287 /* Only emit this long if there was at least one parameter. */
29288 if (fixed_parms
|| float_parms
)
29289 fprintf (file
, "\t.long %d\n", parm_info
);
29291 /* Offset from start of code to tb table. */
29292 fputs ("\t.long ", file
);
29293 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29294 RS6000_OUTPUT_BASENAME (file
, fname
);
29296 rs6000_output_function_entry (file
, fname
);
29299 /* Interrupt handler mask. */
29300 /* Omit this long, since we never set the interrupt handler bit
29303 /* Number of CTL (controlled storage) anchors. */
29304 /* Omit this long, since the has_ctl bit is never set above. */
29306 /* Displacement into stack of each CTL anchor. */
29307 /* Omit this list of longs, because there are no CTL anchors. */
29309 /* Length of function name. */
29312 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
29314 /* Function name. */
29315 assemble_string (fname
, strlen (fname
));
29317 /* Register for alloca automatic storage; this is always reg 31.
29318 Only emit this if the alloca bit was set above. */
29319 if (frame_pointer_needed
)
29320 fputs ("\t.byte 31\n", file
);
29322 fputs ("\t.align 2\n", file
);
29326 /* Arrange to define .LCTOC1 label, if not already done. */
29330 if (!toc_initialized
)
29332 switch_to_section (toc_section
);
29333 switch_to_section (current_function_section ());
29338 /* -fsplit-stack support. */
29340 /* A SYMBOL_REF for __morestack. */
29341 static GTY(()) rtx morestack_ref
;
29344 gen_add3_const (rtx rt
, rtx ra
, long c
)
29347 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
29349 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
29352 /* Emit -fsplit-stack prologue, which goes before the regular function
29353 prologue (at local entry point in the case of ELFv2). */
29356 rs6000_expand_split_stack_prologue (void)
29358 rs6000_stack_t
*info
= rs6000_stack_info ();
29359 unsigned HOST_WIDE_INT allocate
;
29360 long alloc_hi
, alloc_lo
;
29361 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
29364 gcc_assert (flag_split_stack
&& reload_completed
);
29369 if (global_regs
[29])
29371 error ("%qs uses register r29", "-fsplit-stack");
29372 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
29373 "conflicts with %qD", global_regs_decl
[29]);
29376 allocate
= info
->total_size
;
29377 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
29379 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29382 if (morestack_ref
== NULL_RTX
)
29384 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
29385 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
29386 | SYMBOL_FLAG_FUNCTION
);
29389 r0
= gen_rtx_REG (Pmode
, 0);
29390 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29391 r12
= gen_rtx_REG (Pmode
, 12);
29392 emit_insn (gen_load_split_stack_limit (r0
));
29393 /* Always emit two insns here to calculate the requested stack,
29394 so that the linker can edit them when adjusting size for calling
29395 non-split-stack code. */
29396 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
29397 alloc_lo
= -allocate
- alloc_hi
;
29400 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
29402 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
29404 emit_insn (gen_nop ());
29408 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
29409 emit_insn (gen_nop ());
29412 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
29413 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
29414 ok_label
= gen_label_rtx ();
29415 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29416 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
29417 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
29419 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29420 JUMP_LABEL (insn
) = ok_label
;
29421 /* Mark the jump as very likely to be taken. */
29422 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
29424 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
29425 insn
= emit_move_insn (r0
, lr
);
29426 RTX_FRAME_RELATED_P (insn
) = 1;
29427 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
29428 RTX_FRAME_RELATED_P (insn
) = 1;
29430 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
29431 const0_rtx
, const0_rtx
));
29432 call_fusage
= NULL_RTX
;
29433 use_reg (&call_fusage
, r12
);
29434 /* Say the call uses r0, even though it doesn't, to stop regrename
29435 from twiddling with the insns saving lr, trashing args for cfun.
29436 The insns restoring lr are similarly protected by making
29437 split_stack_return use r0. */
29438 use_reg (&call_fusage
, r0
);
29439 add_function_usage_to (insn
, call_fusage
);
29440 /* Indicate that this function can't jump to non-local gotos. */
29441 make_reg_eh_region_note_nothrow_nononlocal (insn
);
29442 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
29443 insn
= emit_move_insn (lr
, r0
);
29444 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
29445 RTX_FRAME_RELATED_P (insn
) = 1;
29446 emit_insn (gen_split_stack_return ());
29448 emit_label (ok_label
);
29449 LABEL_NUSES (ok_label
) = 1;
29452 /* Return the internal arg pointer used for function incoming
29453 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29454 to copy it to a pseudo in order for it to be preserved over calls
29455 and suchlike. We'd really like to use a pseudo here for the
29456 internal arg pointer but data-flow analysis is not prepared to
29457 accept pseudos as live at the beginning of a function. */
29460 rs6000_internal_arg_pointer (void)
29462 if (flag_split_stack
29463 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
29467 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
29471 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
29472 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
29474 /* Put the pseudo initialization right after the note at the
29475 beginning of the function. */
29476 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
29477 gen_rtx_REG (Pmode
, 12));
29478 push_topmost_sequence ();
29479 emit_insn_after (pat
, get_insns ());
29480 pop_topmost_sequence ();
29482 return plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
29483 FIRST_PARM_OFFSET (current_function_decl
));
29485 return virtual_incoming_args_rtx
;
29488 /* We may have to tell the dataflow pass that the split stack prologue
29489 is initializing a register. */
29492 rs6000_live_on_entry (bitmap regs
)
29494 if (flag_split_stack
)
29495 bitmap_set_bit (regs
, 12);
29498 /* Emit -fsplit-stack dynamic stack allocation space check. */
29501 rs6000_split_stack_space_check (rtx size
, rtx label
)
29503 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29504 rtx limit
= gen_reg_rtx (Pmode
);
29505 rtx requested
= gen_reg_rtx (Pmode
);
29506 rtx cmp
= gen_reg_rtx (CCUNSmode
);
29509 emit_insn (gen_load_split_stack_limit (limit
));
29510 if (CONST_INT_P (size
))
29511 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
29514 size
= force_reg (Pmode
, size
);
29515 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
29517 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
29518 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29519 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
29520 gen_rtx_LABEL_REF (VOIDmode
, label
),
29522 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29523 JUMP_LABEL (jump
) = label
;
29526 /* A C compound statement that outputs the assembler code for a thunk
29527 function, used to implement C++ virtual function calls with
29528 multiple inheritance. The thunk acts as a wrapper around a virtual
29529 function, adjusting the implicit object parameter before handing
29530 control off to the real function.
29532 First, emit code to add the integer DELTA to the location that
29533 contains the incoming first argument. Assume that this argument
29534 contains a pointer, and is the one used to pass the `this' pointer
29535 in C++. This is the incoming argument *before* the function
29536 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29537 values of all other incoming arguments.
29539 After the addition, emit code to jump to FUNCTION, which is a
29540 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29541 not touch the return address. Hence returning from FUNCTION will
29542 return to whoever called the current `thunk'.
29544 The effect must be as if FUNCTION had been called directly with the
29545 adjusted first argument. This macro is responsible for emitting
29546 all of the code for a thunk function; output_function_prologue()
29547 and output_function_epilogue() are not invoked.
29549 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29550 been extracted from it.) It might possibly be useful on some
29551 targets, but probably not.
29553 If you do not define this macro, the target-independent code in the
29554 C++ frontend will generate a less efficient heavyweight thunk that
29555 calls FUNCTION instead of jumping to it. The generic approach does
29556 not support varargs. */
29559 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
29560 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
29563 rtx this_rtx
, funexp
;
29566 reload_completed
= 1;
29567 epilogue_completed
= 1;
29569 /* Mark the end of the (empty) prologue. */
29570 emit_note (NOTE_INSN_PROLOGUE_END
);
29572 /* Find the "this" pointer. If the function returns a structure,
29573 the structure return pointer is in r3. */
29574 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
29575 this_rtx
= gen_rtx_REG (Pmode
, 4);
29577 this_rtx
= gen_rtx_REG (Pmode
, 3);
29579 /* Apply the constant offset, if required. */
29581 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
29583 /* Apply the offset from the vtable, if required. */
29586 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
29587 rtx tmp
= gen_rtx_REG (Pmode
, 12);
29589 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
29590 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
29592 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
29593 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
29597 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
29599 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
29601 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
29604 /* Generate a tail call to the target function. */
29605 if (!TREE_USED (function
))
29607 assemble_external (function
);
29608 TREE_USED (function
) = 1;
29610 funexp
= XEXP (DECL_RTL (function
), 0);
29611 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
29614 if (MACHOPIC_INDIRECT
)
29615 funexp
= machopic_indirect_call_target (funexp
);
29618 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29619 generate sibcall RTL explicitly. */
29620 insn
= emit_call_insn (
29621 gen_rtx_PARALLEL (VOIDmode
,
29623 gen_rtx_CALL (VOIDmode
,
29624 funexp
, const0_rtx
),
29625 gen_rtx_USE (VOIDmode
, const0_rtx
),
29626 simple_return_rtx
)));
29627 SIBLING_CALL_P (insn
) = 1;
29630 /* Run just enough of rest_of_compilation to get the insns emitted.
29631 There's not really enough bulk here to make other passes such as
29632 instruction scheduling worth while. Note that use_thunk calls
29633 assemble_start_function and assemble_end_function. */
29634 insn
= get_insns ();
29635 shorten_branches (insn
);
29636 final_start_function (insn
, file
, 1);
29637 final (insn
, file
, 1);
29638 final_end_function ();
29640 reload_completed
= 0;
29641 epilogue_completed
= 0;
29644 /* A quick summary of the various types of 'constant-pool tables'
29647 Target Flags Name One table per
29648 AIX (none) AIX TOC object file
29649 AIX -mfull-toc AIX TOC object file
29650 AIX -mminimal-toc AIX minimal TOC translation unit
29651 SVR4/EABI (none) SVR4 SDATA object file
29652 SVR4/EABI -fpic SVR4 pic object file
29653 SVR4/EABI -fPIC SVR4 PIC translation unit
29654 SVR4/EABI -mrelocatable EABI TOC function
29655 SVR4/EABI -maix AIX TOC object file
29656 SVR4/EABI -maix -mminimal-toc
29657 AIX minimal TOC translation unit
29659 Name Reg. Set by entries contains:
29660 made by addrs? fp? sum?
29662 AIX TOC 2 crt0 as Y option option
29663 AIX minimal TOC 30 prolog gcc Y Y option
29664 SVR4 SDATA 13 crt0 gcc N Y N
29665 SVR4 pic 30 prolog ld Y not yet N
29666 SVR4 PIC 30 prolog gcc Y option option
29667 EABI TOC 30 prolog gcc Y option option
29671 /* Hash functions for the hash table. */
29674 rs6000_hash_constant (rtx k
)
29676 enum rtx_code code
= GET_CODE (k
);
29677 machine_mode mode
= GET_MODE (k
);
29678 unsigned result
= (code
<< 3) ^ mode
;
29679 const char *format
;
29682 format
= GET_RTX_FORMAT (code
);
29683 flen
= strlen (format
);
29689 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29691 case CONST_WIDE_INT
:
29694 flen
= CONST_WIDE_INT_NUNITS (k
);
29695 for (i
= 0; i
< flen
; i
++)
29696 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29701 if (mode
!= VOIDmode
)
29702 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29714 for (; fidx
< flen
; fidx
++)
29715 switch (format
[fidx
])
29720 const char *str
= XSTR (k
, fidx
);
29721 len
= strlen (str
);
29722 result
= result
* 613 + len
;
29723 for (i
= 0; i
< len
; i
++)
29724 result
= result
* 613 + (unsigned) str
[i
];
29729 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29733 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29736 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29737 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29741 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29742 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29749 gcc_unreachable ();
29756 toc_hasher::hash (toc_hash_struct
*thc
)
29758 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29761 /* Compare H1 and H2 for equivalence. */
29764 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29769 if (h1
->key_mode
!= h2
->key_mode
)
29772 return rtx_equal_p (r1
, r2
);
29775 /* These are the names given by the C++ front-end to vtables, and
29776 vtable-like objects. Ideally, this logic should not be here;
29777 instead, there should be some programmatic way of inquiring as
29778 to whether or not an object is a vtable. */
29780 #define VTABLE_NAME_P(NAME) \
29781 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29782 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29783 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29784 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29785 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29787 #ifdef NO_DOLLAR_IN_LABEL
29788 /* Return a GGC-allocated character string translating dollar signs in
29789 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29792 rs6000_xcoff_strip_dollar (const char *name
)
29798 q
= (const char *) strchr (name
, '$');
29800 if (q
== 0 || q
== name
)
29803 len
= strlen (name
);
29804 strip
= XALLOCAVEC (char, len
+ 1);
29805 strcpy (strip
, name
);
29806 p
= strip
+ (q
- name
);
29810 p
= strchr (p
+ 1, '$');
29813 return ggc_alloc_string (strip
, len
);
29818 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29820 const char *name
= XSTR (x
, 0);
29822 /* Currently C++ toc references to vtables can be emitted before it
29823 is decided whether the vtable is public or private. If this is
29824 the case, then the linker will eventually complain that there is
29825 a reference to an unknown section. Thus, for vtables only,
29826 we emit the TOC reference to reference the identifier and not the
29828 if (VTABLE_NAME_P (name
))
29830 RS6000_OUTPUT_BASENAME (file
, name
);
29833 assemble_name (file
, name
);
29836 /* Output a TOC entry. We derive the entry name from what is being
29840 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29843 const char *name
= buf
;
29845 HOST_WIDE_INT offset
= 0;
29847 gcc_assert (!TARGET_NO_TOC
);
29849 /* When the linker won't eliminate them, don't output duplicate
29850 TOC entries (this happens on AIX if there is any kind of TOC,
29851 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29853 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29855 struct toc_hash_struct
*h
;
29857 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29858 time because GGC is not initialized at that point. */
29859 if (toc_hash_table
== NULL
)
29860 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29862 h
= ggc_alloc
<toc_hash_struct
> ();
29864 h
->key_mode
= mode
;
29865 h
->labelno
= labelno
;
29867 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29868 if (*found
== NULL
)
29870 else /* This is indeed a duplicate.
29871 Set this label equal to that label. */
29873 fputs ("\t.set ", file
);
29874 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29875 fprintf (file
, "%d,", labelno
);
29876 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29877 fprintf (file
, "%d\n", ((*found
)->labelno
));
29880 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29881 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29882 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29884 fputs ("\t.set ", file
);
29885 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29886 fprintf (file
, "%d,", labelno
);
29887 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29888 fprintf (file
, "%d\n", ((*found
)->labelno
));
29895 /* If we're going to put a double constant in the TOC, make sure it's
29896 aligned properly when strict alignment is on. */
29897 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29898 && STRICT_ALIGNMENT
29899 && GET_MODE_BITSIZE (mode
) >= 64
29900 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29901 ASM_OUTPUT_ALIGN (file
, 3);
29904 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29906 /* Handle FP constants specially. Note that if we have a minimal
29907 TOC, things we put here aren't actually in the TOC, so we can allow
29909 if (GET_CODE (x
) == CONST_DOUBLE
&&
29910 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29911 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29915 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29916 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29918 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29922 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29923 fputs (DOUBLE_INT_ASM_OP
, file
);
29925 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29926 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29927 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29928 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29929 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29930 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29931 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29932 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
29937 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29938 fputs ("\t.long ", file
);
29940 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29941 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29942 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29943 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29944 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29945 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29949 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29950 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
29954 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29955 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29957 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29961 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29962 fputs (DOUBLE_INT_ASM_OP
, file
);
29964 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29965 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29966 fprintf (file
, "0x%lx%08lx\n",
29967 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29968 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
29973 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29974 fputs ("\t.long ", file
);
29976 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29977 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29978 fprintf (file
, "0x%lx,0x%lx\n",
29979 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29983 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29984 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
29988 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29989 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29991 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29995 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29996 fputs (DOUBLE_INT_ASM_OP
, file
);
29998 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29999 if (WORDS_BIG_ENDIAN
)
30000 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
30002 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
30007 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30008 fputs ("\t.long ", file
);
30010 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
30011 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
30015 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
30017 unsigned HOST_WIDE_INT low
;
30018 HOST_WIDE_INT high
;
30020 low
= INTVAL (x
) & 0xffffffff;
30021 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
30023 /* TOC entries are always Pmode-sized, so when big-endian
30024 smaller integer constants in the TOC need to be padded.
30025 (This is still a win over putting the constants in
30026 a separate constant pool, because then we'd have
30027 to have both a TOC entry _and_ the actual constant.)
30029 For a 32-bit target, CONST_INT values are loaded and shifted
30030 entirely within `low' and can be stored in one TOC entry. */
30032 /* It would be easy to make this work, but it doesn't now. */
30033 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
30035 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
30038 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
30039 high
= (HOST_WIDE_INT
) low
>> 32;
30045 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30046 fputs (DOUBLE_INT_ASM_OP
, file
);
30048 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
30049 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30050 fprintf (file
, "0x%lx%08lx\n",
30051 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30056 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
30058 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30059 fputs ("\t.long ", file
);
30061 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
30062 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30063 fprintf (file
, "0x%lx,0x%lx\n",
30064 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
30068 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30069 fputs ("\t.long ", file
);
30071 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
30072 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
30078 if (GET_CODE (x
) == CONST
)
30080 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
30081 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
30083 base
= XEXP (XEXP (x
, 0), 0);
30084 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
30087 switch (GET_CODE (base
))
30090 name
= XSTR (base
, 0);
30094 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
30095 CODE_LABEL_NUMBER (XEXP (base
, 0)));
30099 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
30103 gcc_unreachable ();
30106 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
30107 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
30110 fputs ("\t.tc ", file
);
30111 RS6000_OUTPUT_BASENAME (file
, name
);
30114 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
30116 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
30118 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30119 after other TOC symbols, reducing overflow of small TOC access
30120 to [TC] symbols. */
30121 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
30122 ? "[TE]," : "[TC],", file
);
30125 /* Currently C++ toc references to vtables can be emitted before it
30126 is decided whether the vtable is public or private. If this is
30127 the case, then the linker will eventually complain that there is
30128 a TOC reference to an unknown section. Thus, for vtables only,
30129 we emit the TOC reference to reference the symbol and not the
30131 if (VTABLE_NAME_P (name
))
30133 RS6000_OUTPUT_BASENAME (file
, name
);
30135 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
30136 else if (offset
> 0)
30137 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
30140 output_addr_const (file
, x
);
30143 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
30145 switch (SYMBOL_REF_TLS_MODEL (base
))
30149 case TLS_MODEL_LOCAL_EXEC
:
30150 fputs ("@le", file
);
30152 case TLS_MODEL_INITIAL_EXEC
:
30153 fputs ("@ie", file
);
30155 /* Use global-dynamic for local-dynamic. */
30156 case TLS_MODEL_GLOBAL_DYNAMIC
:
30157 case TLS_MODEL_LOCAL_DYNAMIC
:
30159 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
30160 fputs ("\t.tc .", file
);
30161 RS6000_OUTPUT_BASENAME (file
, name
);
30162 fputs ("[TC],", file
);
30163 output_addr_const (file
, x
);
30164 fputs ("@m", file
);
30167 gcc_unreachable ();
30175 /* Output an assembler pseudo-op to write an ASCII string of N characters
30176 starting at P to FILE.
30178 On the RS/6000, we have to do this using the .byte operation and
30179 write out special characters outside the quoted string.
30180 Also, the assembler is broken; very long strings are truncated,
30181 so we must artificially break them up early. */
30184 output_ascii (FILE *file
, const char *p
, int n
)
30187 int i
, count_string
;
30188 const char *for_string
= "\t.byte \"";
30189 const char *for_decimal
= "\t.byte ";
30190 const char *to_close
= NULL
;
30193 for (i
= 0; i
< n
; i
++)
30196 if (c
>= ' ' && c
< 0177)
30199 fputs (for_string
, file
);
30202 /* Write two quotes to get one. */
30210 for_decimal
= "\"\n\t.byte ";
30214 if (count_string
>= 512)
30216 fputs (to_close
, file
);
30218 for_string
= "\t.byte \"";
30219 for_decimal
= "\t.byte ";
30227 fputs (for_decimal
, file
);
30228 fprintf (file
, "%d", c
);
30230 for_string
= "\n\t.byte \"";
30231 for_decimal
= ", ";
30237 /* Now close the string if we have written one. Then end the line. */
30239 fputs (to_close
, file
);
30242 /* Generate a unique section name for FILENAME for a section type
30243 represented by SECTION_DESC. Output goes into BUF.
30245 SECTION_DESC can be any string, as long as it is different for each
30246 possible section type.
30248 We name the section in the same manner as xlc. The name begins with an
30249 underscore followed by the filename (after stripping any leading directory
30250 names) with the last period replaced by the string SECTION_DESC. If
30251 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30255 rs6000_gen_section_name (char **buf
, const char *filename
,
30256 const char *section_desc
)
30258 const char *q
, *after_last_slash
, *last_period
= 0;
30262 after_last_slash
= filename
;
30263 for (q
= filename
; *q
; q
++)
30266 after_last_slash
= q
+ 1;
30267 else if (*q
== '.')
30271 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
30272 *buf
= (char *) xmalloc (len
);
30277 for (q
= after_last_slash
; *q
; q
++)
30279 if (q
== last_period
)
30281 strcpy (p
, section_desc
);
30282 p
+= strlen (section_desc
);
30286 else if (ISALNUM (*q
))
30290 if (last_period
== 0)
30291 strcpy (p
, section_desc
);
30296 /* Emit profile function. */
30299 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
30301 /* Non-standard profiling for kernels, which just saves LR then calls
30302 _mcount without worrying about arg saves. The idea is to change
30303 the function prologue as little as possible as it isn't easy to
30304 account for arg save/restore code added just for _mcount. */
30305 if (TARGET_PROFILE_KERNEL
)
30308 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
30310 #ifndef NO_PROFILE_COUNTERS
30311 # define NO_PROFILE_COUNTERS 0
30313 if (NO_PROFILE_COUNTERS
)
30314 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30315 LCT_NORMAL
, VOIDmode
);
30319 const char *label_name
;
30322 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30323 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
30324 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
30326 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30327 LCT_NORMAL
, VOIDmode
, fun
, Pmode
);
30330 else if (DEFAULT_ABI
== ABI_DARWIN
)
30332 const char *mcount_name
= RS6000_MCOUNT
;
30333 int caller_addr_regno
= LR_REGNO
;
30335 /* Be conservative and always set this, at least for now. */
30336 crtl
->uses_pic_offset_table
= 1;
30339 /* For PIC code, set up a stub and collect the caller's address
30340 from r0, which is where the prologue puts it. */
30341 if (MACHOPIC_INDIRECT
30342 && crtl
->uses_pic_offset_table
)
30343 caller_addr_regno
= 0;
30345 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
30346 LCT_NORMAL
, VOIDmode
,
30347 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
30351 /* Write function profiler code. */
30354 output_function_profiler (FILE *file
, int labelno
)
30358 switch (DEFAULT_ABI
)
30361 gcc_unreachable ();
30366 warning (0, "no profiling of 64-bit code for this ABI");
30369 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30370 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
30371 if (NO_PROFILE_COUNTERS
)
30373 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30374 reg_names
[0], reg_names
[1]);
30376 else if (TARGET_SECURE_PLT
&& flag_pic
)
30378 if (TARGET_LINK_STACK
)
30381 get_ppc476_thunk_name (name
);
30382 asm_fprintf (file
, "\tbl %s\n", name
);
30385 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
30386 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30387 reg_names
[0], reg_names
[1]);
30388 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30389 asm_fprintf (file
, "\taddis %s,%s,",
30390 reg_names
[12], reg_names
[12]);
30391 assemble_name (file
, buf
);
30392 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
30393 assemble_name (file
, buf
);
30394 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
30396 else if (flag_pic
== 1)
30398 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
30399 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30400 reg_names
[0], reg_names
[1]);
30401 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30402 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
30403 assemble_name (file
, buf
);
30404 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
30406 else if (flag_pic
> 1)
30408 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30409 reg_names
[0], reg_names
[1]);
30410 /* Now, we need to get the address of the label. */
30411 if (TARGET_LINK_STACK
)
30414 get_ppc476_thunk_name (name
);
30415 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
30416 assemble_name (file
, buf
);
30417 fputs ("-.\n1:", file
);
30418 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30419 asm_fprintf (file
, "\taddi %s,%s,4\n",
30420 reg_names
[11], reg_names
[11]);
30424 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
30425 assemble_name (file
, buf
);
30426 fputs ("-.\n1:", file
);
30427 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30429 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
30430 reg_names
[0], reg_names
[11]);
30431 asm_fprintf (file
, "\tadd %s,%s,%s\n",
30432 reg_names
[0], reg_names
[0], reg_names
[11]);
30436 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
30437 assemble_name (file
, buf
);
30438 fputs ("@ha\n", file
);
30439 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30440 reg_names
[0], reg_names
[1]);
30441 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
30442 assemble_name (file
, buf
);
30443 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
30446 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30447 fprintf (file
, "\tbl %s%s\n",
30448 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
30454 /* Don't do anything, done in output_profile_hook (). */
30461 /* The following variable value is the last issued insn. */
30463 static rtx_insn
*last_scheduled_insn
;
30465 /* The following variable helps to balance issuing of load and
30466 store instructions */
30468 static int load_store_pendulum
;
30470 /* The following variable helps pair divide insns during scheduling. */
30471 static int divide_cnt
;
30472 /* The following variable helps pair and alternate vector and vector load
30473 insns during scheduling. */
30474 static int vec_pairing
;
30477 /* Power4 load update and store update instructions are cracked into a
30478 load or store and an integer insn which are executed in the same cycle.
30479 Branches have their own dispatch slot which does not count against the
30480 GCC issue rate, but it changes the program flow so there are no other
30481 instructions to issue in this cycle. */
30484 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
30486 last_scheduled_insn
= insn
;
30487 if (GET_CODE (PATTERN (insn
)) == USE
30488 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30490 cached_can_issue_more
= more
;
30491 return cached_can_issue_more
;
30494 if (insn_terminates_group_p (insn
, current_group
))
30496 cached_can_issue_more
= 0;
30497 return cached_can_issue_more
;
30500 /* If no reservation, but reach here */
30501 if (recog_memoized (insn
) < 0)
30504 if (rs6000_sched_groups
)
30506 if (is_microcoded_insn (insn
))
30507 cached_can_issue_more
= 0;
30508 else if (is_cracked_insn (insn
))
30509 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
30511 cached_can_issue_more
= more
- 1;
30513 return cached_can_issue_more
;
30516 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
30519 cached_can_issue_more
= more
- 1;
30520 return cached_can_issue_more
;
30524 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
30526 int r
= rs6000_variable_issue_1 (insn
, more
);
30528 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
30532 /* Adjust the cost of a scheduling dependency. Return the new cost of
30533 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30536 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
30539 enum attr_type attr_type
;
30541 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
30548 /* Data dependency; DEP_INSN writes a register that INSN reads
30549 some cycles later. */
30551 /* Separate a load from a narrower, dependent store. */
30552 if ((rs6000_sched_groups
|| rs6000_cpu_attr
== CPU_POWER9
)
30553 && GET_CODE (PATTERN (insn
)) == SET
30554 && GET_CODE (PATTERN (dep_insn
)) == SET
30555 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
30556 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
30557 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
30558 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
30561 attr_type
= get_attr_type (insn
);
30566 /* Tell the first scheduling pass about the latency between
30567 a mtctr and bctr (and mtlr and br/blr). The first
30568 scheduling pass will not know about this latency since
30569 the mtctr instruction, which has the latency associated
30570 to it, will be generated by reload. */
30573 /* Leave some extra cycles between a compare and its
30574 dependent branch, to inhibit expensive mispredicts. */
30575 if ((rs6000_cpu_attr
== CPU_PPC603
30576 || rs6000_cpu_attr
== CPU_PPC604
30577 || rs6000_cpu_attr
== CPU_PPC604E
30578 || rs6000_cpu_attr
== CPU_PPC620
30579 || rs6000_cpu_attr
== CPU_PPC630
30580 || rs6000_cpu_attr
== CPU_PPC750
30581 || rs6000_cpu_attr
== CPU_PPC7400
30582 || rs6000_cpu_attr
== CPU_PPC7450
30583 || rs6000_cpu_attr
== CPU_PPCE5500
30584 || rs6000_cpu_attr
== CPU_PPCE6500
30585 || rs6000_cpu_attr
== CPU_POWER4
30586 || rs6000_cpu_attr
== CPU_POWER5
30587 || rs6000_cpu_attr
== CPU_POWER7
30588 || rs6000_cpu_attr
== CPU_POWER8
30589 || rs6000_cpu_attr
== CPU_POWER9
30590 || rs6000_cpu_attr
== CPU_CELL
)
30591 && recog_memoized (dep_insn
)
30592 && (INSN_CODE (dep_insn
) >= 0))
30594 switch (get_attr_type (dep_insn
))
30597 case TYPE_FPCOMPARE
:
30598 case TYPE_CR_LOGICAL
:
30599 case TYPE_DELAYED_CR
:
30603 if (get_attr_dot (dep_insn
) == DOT_YES
)
30608 if (get_attr_dot (dep_insn
) == DOT_YES
30609 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
30620 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30621 && recog_memoized (dep_insn
)
30622 && (INSN_CODE (dep_insn
) >= 0))
30625 if (GET_CODE (PATTERN (insn
)) != SET
)
30626 /* If this happens, we have to extend this to schedule
30627 optimally. Return default for now. */
30630 /* Adjust the cost for the case where the value written
30631 by a fixed point operation is used as the address
30632 gen value on a store. */
30633 switch (get_attr_type (dep_insn
))
30638 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30639 return get_attr_sign_extend (dep_insn
)
30640 == SIGN_EXTEND_YES
? 6 : 4;
30645 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30646 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30656 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30664 if (get_attr_update (dep_insn
) == UPDATE_YES
30665 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30671 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30677 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30678 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30688 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30689 && recog_memoized (dep_insn
)
30690 && (INSN_CODE (dep_insn
) >= 0))
30693 /* Adjust the cost for the case where the value written
30694 by a fixed point instruction is used within the address
30695 gen portion of a subsequent load(u)(x) */
30696 switch (get_attr_type (dep_insn
))
30701 if (set_to_load_agen (dep_insn
, insn
))
30702 return get_attr_sign_extend (dep_insn
)
30703 == SIGN_EXTEND_YES
? 6 : 4;
30708 if (set_to_load_agen (dep_insn
, insn
))
30709 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30719 if (set_to_load_agen (dep_insn
, insn
))
30727 if (get_attr_update (dep_insn
) == UPDATE_YES
30728 && set_to_load_agen (dep_insn
, insn
))
30734 if (set_to_load_agen (dep_insn
, insn
))
30740 if (set_to_load_agen (dep_insn
, insn
))
30741 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30751 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30752 && get_attr_update (insn
) == UPDATE_NO
30753 && recog_memoized (dep_insn
)
30754 && (INSN_CODE (dep_insn
) >= 0)
30755 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30762 /* Fall out to return default cost. */
30766 case REG_DEP_OUTPUT
:
30767 /* Output dependency; DEP_INSN writes a register that INSN writes some
30769 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30770 && recog_memoized (dep_insn
)
30771 && (INSN_CODE (dep_insn
) >= 0))
30773 attr_type
= get_attr_type (insn
);
30778 case TYPE_FPSIMPLE
:
30779 if (get_attr_type (dep_insn
) == TYPE_FP
30780 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30784 if (get_attr_update (insn
) == UPDATE_NO
30785 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30792 /* Fall through, no cost for output dependency. */
30796 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30801 gcc_unreachable ();
30807 /* Debug version of rs6000_adjust_cost. */
30810 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30811 int cost
, unsigned int dw
)
30813 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30821 default: dep
= "unknown depencency"; break;
30822 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30823 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30824 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30828 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30829 "%s, insn:\n", ret
, cost
, dep
);
30837 /* The function returns a true if INSN is microcoded.
30838 Return false otherwise. */
30841 is_microcoded_insn (rtx_insn
*insn
)
30843 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30844 || GET_CODE (PATTERN (insn
)) == USE
30845 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30848 if (rs6000_cpu_attr
== CPU_CELL
)
30849 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30851 if (rs6000_sched_groups
30852 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30854 enum attr_type type
= get_attr_type (insn
);
30855 if ((type
== TYPE_LOAD
30856 && get_attr_update (insn
) == UPDATE_YES
30857 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30858 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30859 && get_attr_update (insn
) == UPDATE_YES
30860 && get_attr_indexed (insn
) == INDEXED_YES
)
30861 || type
== TYPE_MFCR
)
30868 /* The function returns true if INSN is cracked into 2 instructions
30869 by the processor (and therefore occupies 2 issue slots). */
30872 is_cracked_insn (rtx_insn
*insn
)
30874 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30875 || GET_CODE (PATTERN (insn
)) == USE
30876 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30879 if (rs6000_sched_groups
30880 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30882 enum attr_type type
= get_attr_type (insn
);
30883 if ((type
== TYPE_LOAD
30884 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30885 && get_attr_update (insn
) == UPDATE_NO
)
30886 || (type
== TYPE_LOAD
30887 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30888 && get_attr_update (insn
) == UPDATE_YES
30889 && get_attr_indexed (insn
) == INDEXED_NO
)
30890 || (type
== TYPE_STORE
30891 && get_attr_update (insn
) == UPDATE_YES
30892 && get_attr_indexed (insn
) == INDEXED_NO
)
30893 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30894 && get_attr_update (insn
) == UPDATE_YES
)
30895 || type
== TYPE_DELAYED_CR
30896 || (type
== TYPE_EXTS
30897 && get_attr_dot (insn
) == DOT_YES
)
30898 || (type
== TYPE_SHIFT
30899 && get_attr_dot (insn
) == DOT_YES
30900 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30901 || (type
== TYPE_MUL
30902 && get_attr_dot (insn
) == DOT_YES
)
30903 || type
== TYPE_DIV
30904 || (type
== TYPE_INSERT
30905 && get_attr_size (insn
) == SIZE_32
))
30912 /* The function returns true if INSN can be issued only from
30913 the branch slot. */
30916 is_branch_slot_insn (rtx_insn
*insn
)
30918 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30919 || GET_CODE (PATTERN (insn
)) == USE
30920 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30923 if (rs6000_sched_groups
)
30925 enum attr_type type
= get_attr_type (insn
);
30926 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
30934 /* The function returns true if out_inst sets a value that is
30935 used in the address generation computation of in_insn */
30937 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
30939 rtx out_set
, in_set
;
30941 /* For performance reasons, only handle the simple case where
30942 both loads are a single_set. */
30943 out_set
= single_set (out_insn
);
30946 in_set
= single_set (in_insn
);
30948 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
30954 /* Try to determine base/offset/size parts of the given MEM.
30955 Return true if successful, false if all the values couldn't
30958 This function only looks for REG or REG+CONST address forms.
30959 REG+REG address form will return false. */
30962 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
30963 HOST_WIDE_INT
*size
)
30966 if MEM_SIZE_KNOWN_P (mem
)
30967 *size
= MEM_SIZE (mem
);
30971 addr_rtx
= (XEXP (mem
, 0));
30972 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
30973 addr_rtx
= XEXP (addr_rtx
, 1);
30976 while (GET_CODE (addr_rtx
) == PLUS
30977 && CONST_INT_P (XEXP (addr_rtx
, 1)))
30979 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
30980 addr_rtx
= XEXP (addr_rtx
, 0);
30982 if (!REG_P (addr_rtx
))
30989 /* The function returns true if the target storage location of
30990 mem1 is adjacent to the target storage location of mem2 */
30991 /* Return 1 if memory locations are adjacent. */
30994 adjacent_mem_locations (rtx mem1
, rtx mem2
)
30997 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30999 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
31000 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
31001 return ((REGNO (reg1
) == REGNO (reg2
))
31002 && ((off1
+ size1
== off2
)
31003 || (off2
+ size2
== off1
)));
31008 /* This function returns true if it can be determined that the two MEM
31009 locations overlap by at least 1 byte based on base reg/offset/size. */
31012 mem_locations_overlap (rtx mem1
, rtx mem2
)
31015 HOST_WIDE_INT off1
, size1
, off2
, size2
;
31017 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
31018 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
31019 return ((REGNO (reg1
) == REGNO (reg2
))
31020 && (((off1
<= off2
) && (off1
+ size1
> off2
))
31021 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
31026 /* A C statement (sans semicolon) to update the integer scheduling
31027 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31028 INSN earlier, reduce the priority to execute INSN later. Do not
31029 define this macro if you do not need to adjust the scheduling
31030 priorities of insns. */
31033 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
31035 rtx load_mem
, str_mem
;
31036 /* On machines (like the 750) which have asymmetric integer units,
31037 where one integer unit can do multiply and divides and the other
31038 can't, reduce the priority of multiply/divide so it is scheduled
31039 before other integer operations. */
31042 if (! INSN_P (insn
))
31045 if (GET_CODE (PATTERN (insn
)) == USE
)
31048 switch (rs6000_cpu_attr
) {
31050 switch (get_attr_type (insn
))
31057 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
31058 priority
, priority
);
31059 if (priority
>= 0 && priority
< 0x01000000)
31066 if (insn_must_be_first_in_group (insn
)
31067 && reload_completed
31068 && current_sched_info
->sched_max_insns_priority
31069 && rs6000_sched_restricted_insns_priority
)
31072 /* Prioritize insns that can be dispatched only in the first
31074 if (rs6000_sched_restricted_insns_priority
== 1)
31075 /* Attach highest priority to insn. This means that in
31076 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31077 precede 'priority' (critical path) considerations. */
31078 return current_sched_info
->sched_max_insns_priority
;
31079 else if (rs6000_sched_restricted_insns_priority
== 2)
31080 /* Increase priority of insn by a minimal amount. This means that in
31081 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31082 considerations precede dispatch-slot restriction considerations. */
31083 return (priority
+ 1);
31086 if (rs6000_cpu
== PROCESSOR_POWER6
31087 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
31088 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
31089 /* Attach highest priority to insn if the scheduler has just issued two
31090 stores and this instruction is a load, or two loads and this instruction
31091 is a store. Power6 wants loads and stores scheduled alternately
31093 return current_sched_info
->sched_max_insns_priority
;
31098 /* Return true if the instruction is nonpipelined on the Cell. */
31100 is_nonpipeline_insn (rtx_insn
*insn
)
31102 enum attr_type type
;
31103 if (!insn
|| !NONDEBUG_INSN_P (insn
)
31104 || GET_CODE (PATTERN (insn
)) == USE
31105 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31108 type
= get_attr_type (insn
);
31109 if (type
== TYPE_MUL
31110 || type
== TYPE_DIV
31111 || type
== TYPE_SDIV
31112 || type
== TYPE_DDIV
31113 || type
== TYPE_SSQRT
31114 || type
== TYPE_DSQRT
31115 || type
== TYPE_MFCR
31116 || type
== TYPE_MFCRF
31117 || type
== TYPE_MFJMPR
)
31125 /* Return how many instructions the machine can issue per cycle. */
31128 rs6000_issue_rate (void)
31130 /* Unless scheduling for register pressure, use issue rate of 1 for
31131 first scheduling pass to decrease degradation. */
31132 if (!reload_completed
&& !flag_sched_pressure
)
31135 switch (rs6000_cpu_attr
) {
31137 case CPU_PPC601
: /* ? */
31147 case CPU_PPCE300C2
:
31148 case CPU_PPCE300C3
:
31149 case CPU_PPCE500MC
:
31150 case CPU_PPCE500MC64
:
31175 /* Return how many instructions to look ahead for better insn
31179 rs6000_use_sched_lookahead (void)
31181 switch (rs6000_cpu_attr
)
31188 return (reload_completed
? 8 : 0);
31195 /* We are choosing insn from the ready queue. Return zero if INSN can be
31198 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
31200 if (ready_index
== 0)
31203 if (rs6000_cpu_attr
!= CPU_CELL
)
31206 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
31208 if (!reload_completed
31209 || is_nonpipeline_insn (insn
)
31210 || is_microcoded_insn (insn
))
31216 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31217 and return true. */
31220 find_mem_ref (rtx pat
, rtx
*mem_ref
)
31225 /* stack_tie does not produce any real memory traffic. */
31226 if (tie_operand (pat
, VOIDmode
))
31229 if (GET_CODE (pat
) == MEM
)
31235 /* Recursively process the pattern. */
31236 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
31238 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
31242 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
31245 else if (fmt
[i
] == 'E')
31246 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
31248 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
31256 /* Determine if PAT is a PATTERN of a load insn. */
31259 is_load_insn1 (rtx pat
, rtx
*load_mem
)
31261 if (!pat
|| pat
== NULL_RTX
)
31264 if (GET_CODE (pat
) == SET
)
31265 return find_mem_ref (SET_SRC (pat
), load_mem
);
31267 if (GET_CODE (pat
) == PARALLEL
)
31271 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31272 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
31279 /* Determine if INSN loads from memory. */
31282 is_load_insn (rtx insn
, rtx
*load_mem
)
31284 if (!insn
|| !INSN_P (insn
))
31290 return is_load_insn1 (PATTERN (insn
), load_mem
);
31293 /* Determine if PAT is a PATTERN of a store insn. */
31296 is_store_insn1 (rtx pat
, rtx
*str_mem
)
31298 if (!pat
|| pat
== NULL_RTX
)
31301 if (GET_CODE (pat
) == SET
)
31302 return find_mem_ref (SET_DEST (pat
), str_mem
);
31304 if (GET_CODE (pat
) == PARALLEL
)
31308 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31309 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
31316 /* Determine if INSN stores to memory. */
31319 is_store_insn (rtx insn
, rtx
*str_mem
)
31321 if (!insn
|| !INSN_P (insn
))
31324 return is_store_insn1 (PATTERN (insn
), str_mem
);
31327 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31330 is_power9_pairable_vec_type (enum attr_type type
)
31334 case TYPE_VECSIMPLE
:
31335 case TYPE_VECCOMPLEX
:
31339 case TYPE_VECFLOAT
:
31341 case TYPE_VECDOUBLE
:
31349 /* Returns whether the dependence between INSN and NEXT is considered
31350 costly by the given target. */
31353 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
31357 rtx load_mem
, str_mem
;
31359 /* If the flag is not enabled - no dependence is considered costly;
31360 allow all dependent insns in the same group.
31361 This is the most aggressive option. */
31362 if (rs6000_sched_costly_dep
== no_dep_costly
)
31365 /* If the flag is set to 1 - a dependence is always considered costly;
31366 do not allow dependent instructions in the same group.
31367 This is the most conservative option. */
31368 if (rs6000_sched_costly_dep
== all_deps_costly
)
31371 insn
= DEP_PRO (dep
);
31372 next
= DEP_CON (dep
);
31374 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
31375 && is_load_insn (next
, &load_mem
)
31376 && is_store_insn (insn
, &str_mem
))
31377 /* Prevent load after store in the same group. */
31380 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
31381 && is_load_insn (next
, &load_mem
)
31382 && is_store_insn (insn
, &str_mem
)
31383 && DEP_TYPE (dep
) == REG_DEP_TRUE
31384 && mem_locations_overlap(str_mem
, load_mem
))
31385 /* Prevent load after store in the same group if it is a true
31389 /* The flag is set to X; dependences with latency >= X are considered costly,
31390 and will not be scheduled in the same group. */
31391 if (rs6000_sched_costly_dep
<= max_dep_latency
31392 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
31398 /* Return the next insn after INSN that is found before TAIL is reached,
31399 skipping any "non-active" insns - insns that will not actually occupy
31400 an issue slot. Return NULL_RTX if such an insn is not found. */
31403 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
31405 if (insn
== NULL_RTX
|| insn
== tail
)
31410 insn
= NEXT_INSN (insn
);
31411 if (insn
== NULL_RTX
|| insn
== tail
)
31415 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
31416 || (NONJUMP_INSN_P (insn
)
31417 && GET_CODE (PATTERN (insn
)) != USE
31418 && GET_CODE (PATTERN (insn
)) != CLOBBER
31419 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
31425 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31428 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
31433 enum attr_type type
, type2
;
31435 type
= get_attr_type (last_scheduled_insn
);
31437 /* Try to issue fixed point divides back-to-back in pairs so they will be
31438 routed to separate execution units and execute in parallel. */
31439 if (type
== TYPE_DIV
&& divide_cnt
== 0)
31441 /* First divide has been scheduled. */
31444 /* Scan the ready list looking for another divide, if found move it
31445 to the end of the list so it is chosen next. */
31449 if (recog_memoized (ready
[pos
]) >= 0
31450 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
31453 for (i
= pos
; i
< lastpos
; i
++)
31454 ready
[i
] = ready
[i
+ 1];
31455 ready
[lastpos
] = tmp
;
31463 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31466 /* The best dispatch throughput for vector and vector load insns can be
31467 achieved by interleaving a vector and vector load such that they'll
31468 dispatch to the same superslice. If this pairing cannot be achieved
31469 then it is best to pair vector insns together and vector load insns
31472 To aid in this pairing, vec_pairing maintains the current state with
31473 the following values:
31475 0 : Initial state, no vecload/vector pairing has been started.
31477 1 : A vecload or vector insn has been issued and a candidate for
31478 pairing has been found and moved to the end of the ready
31480 if (type
== TYPE_VECLOAD
)
31482 /* Issued a vecload. */
31483 if (vec_pairing
== 0)
31485 int vecload_pos
= -1;
31486 /* We issued a single vecload, look for a vector insn to pair it
31487 with. If one isn't found, try to pair another vecload. */
31491 if (recog_memoized (ready
[pos
]) >= 0)
31493 type2
= get_attr_type (ready
[pos
]);
31494 if (is_power9_pairable_vec_type (type2
))
31496 /* Found a vector insn to pair with, move it to the
31497 end of the ready list so it is scheduled next. */
31499 for (i
= pos
; i
< lastpos
; i
++)
31500 ready
[i
] = ready
[i
+ 1];
31501 ready
[lastpos
] = tmp
;
31503 return cached_can_issue_more
;
31505 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
31506 /* Remember position of first vecload seen. */
31511 if (vecload_pos
>= 0)
31513 /* Didn't find a vector to pair with but did find a vecload,
31514 move it to the end of the ready list. */
31515 tmp
= ready
[vecload_pos
];
31516 for (i
= vecload_pos
; i
< lastpos
; i
++)
31517 ready
[i
] = ready
[i
+ 1];
31518 ready
[lastpos
] = tmp
;
31520 return cached_can_issue_more
;
31524 else if (is_power9_pairable_vec_type (type
))
31526 /* Issued a vector operation. */
31527 if (vec_pairing
== 0)
31530 /* We issued a single vector insn, look for a vecload to pair it
31531 with. If one isn't found, try to pair another vector. */
31535 if (recog_memoized (ready
[pos
]) >= 0)
31537 type2
= get_attr_type (ready
[pos
]);
31538 if (type2
== TYPE_VECLOAD
)
31540 /* Found a vecload insn to pair with, move it to the
31541 end of the ready list so it is scheduled next. */
31543 for (i
= pos
; i
< lastpos
; i
++)
31544 ready
[i
] = ready
[i
+ 1];
31545 ready
[lastpos
] = tmp
;
31547 return cached_can_issue_more
;
31549 else if (is_power9_pairable_vec_type (type2
)
31551 /* Remember position of first vector insn seen. */
31558 /* Didn't find a vecload to pair with but did find a vector
31559 insn, move it to the end of the ready list. */
31560 tmp
= ready
[vec_pos
];
31561 for (i
= vec_pos
; i
< lastpos
; i
++)
31562 ready
[i
] = ready
[i
+ 1];
31563 ready
[lastpos
] = tmp
;
31565 return cached_can_issue_more
;
31570 /* We've either finished a vec/vecload pair, couldn't find an insn to
31571 continue the current pair, or the last insn had nothing to do with
31572 with pairing. In any case, reset the state. */
31576 return cached_can_issue_more
;
31579 /* We are about to begin issuing insns for this clock cycle. */
31582 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
31583 rtx_insn
**ready ATTRIBUTE_UNUSED
,
31584 int *pn_ready ATTRIBUTE_UNUSED
,
31585 int clock_var ATTRIBUTE_UNUSED
)
31587 int n_ready
= *pn_ready
;
31590 fprintf (dump
, "// rs6000_sched_reorder :\n");
31592 /* Reorder the ready list, if the second to last ready insn
31593 is a nonepipeline insn. */
31594 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
31596 if (is_nonpipeline_insn (ready
[n_ready
- 1])
31597 && (recog_memoized (ready
[n_ready
- 2]) > 0))
31598 /* Simply swap first two insns. */
31599 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
31602 if (rs6000_cpu
== PROCESSOR_POWER6
)
31603 load_store_pendulum
= 0;
31605 return rs6000_issue_rate ();
31608 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31611 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
31612 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
31615 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
31617 /* For Power6, we need to handle some special cases to try and keep the
31618 store queue from overflowing and triggering expensive flushes.
31620 This code monitors how load and store instructions are being issued
31621 and skews the ready list one way or the other to increase the likelihood
31622 that a desired instruction is issued at the proper time.
31624 A couple of things are done. First, we maintain a "load_store_pendulum"
31625 to track the current state of load/store issue.
31627 - If the pendulum is at zero, then no loads or stores have been
31628 issued in the current cycle so we do nothing.
31630 - If the pendulum is 1, then a single load has been issued in this
31631 cycle and we attempt to locate another load in the ready list to
31634 - If the pendulum is -2, then two stores have already been
31635 issued in this cycle, so we increase the priority of the first load
31636 in the ready list to increase it's likelihood of being chosen first
31639 - If the pendulum is -1, then a single store has been issued in this
31640 cycle and we attempt to locate another store in the ready list to
31641 issue with it, preferring a store to an adjacent memory location to
31642 facilitate store pairing in the store queue.
31644 - If the pendulum is 2, then two loads have already been
31645 issued in this cycle, so we increase the priority of the first store
31646 in the ready list to increase it's likelihood of being chosen first
31649 - If the pendulum < -2 or > 2, then do nothing.
31651 Note: This code covers the most common scenarios. There exist non
31652 load/store instructions which make use of the LSU and which
31653 would need to be accounted for to strictly model the behavior
31654 of the machine. Those instructions are currently unaccounted
31655 for to help minimize compile time overhead of this code.
31657 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31662 rtx load_mem
, str_mem
;
31664 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31665 /* Issuing a store, swing the load_store_pendulum to the left */
31666 load_store_pendulum
--;
31667 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31668 /* Issuing a load, swing the load_store_pendulum to the right */
31669 load_store_pendulum
++;
31671 return cached_can_issue_more
;
31673 /* If the pendulum is balanced, or there is only one instruction on
31674 the ready list, then all is well, so return. */
31675 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31676 return cached_can_issue_more
;
31678 if (load_store_pendulum
== 1)
31680 /* A load has been issued in this cycle. Scan the ready list
31681 for another load to issue with it */
31686 if (is_load_insn (ready
[pos
], &load_mem
))
31688 /* Found a load. Move it to the head of the ready list,
31689 and adjust it's priority so that it is more likely to
31692 for (i
=pos
; i
<*pn_ready
-1; i
++)
31693 ready
[i
] = ready
[i
+ 1];
31694 ready
[*pn_ready
-1] = tmp
;
31696 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31697 INSN_PRIORITY (tmp
)++;
31703 else if (load_store_pendulum
== -2)
31705 /* Two stores have been issued in this cycle. Increase the
31706 priority of the first load in the ready list to favor it for
31707 issuing in the next cycle. */
31712 if (is_load_insn (ready
[pos
], &load_mem
)
31714 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31716 INSN_PRIORITY (ready
[pos
])++;
31718 /* Adjust the pendulum to account for the fact that a load
31719 was found and increased in priority. This is to prevent
31720 increasing the priority of multiple loads */
31721 load_store_pendulum
--;
31728 else if (load_store_pendulum
== -1)
31730 /* A store has been issued in this cycle. Scan the ready list for
31731 another store to issue with it, preferring a store to an adjacent
31733 int first_store_pos
= -1;
31739 if (is_store_insn (ready
[pos
], &str_mem
))
31742 /* Maintain the index of the first store found on the
31744 if (first_store_pos
== -1)
31745 first_store_pos
= pos
;
31747 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31748 && adjacent_mem_locations (str_mem
, str_mem2
))
31750 /* Found an adjacent store. Move it to the head of the
31751 ready list, and adjust it's priority so that it is
31752 more likely to stay there */
31754 for (i
=pos
; i
<*pn_ready
-1; i
++)
31755 ready
[i
] = ready
[i
+ 1];
31756 ready
[*pn_ready
-1] = tmp
;
31758 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31759 INSN_PRIORITY (tmp
)++;
31761 first_store_pos
= -1;
31769 if (first_store_pos
>= 0)
31771 /* An adjacent store wasn't found, but a non-adjacent store was,
31772 so move the non-adjacent store to the front of the ready
31773 list, and adjust its priority so that it is more likely to
31775 tmp
= ready
[first_store_pos
];
31776 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31777 ready
[i
] = ready
[i
+ 1];
31778 ready
[*pn_ready
-1] = tmp
;
31779 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31780 INSN_PRIORITY (tmp
)++;
31783 else if (load_store_pendulum
== 2)
31785 /* Two loads have been issued in this cycle. Increase the priority
31786 of the first store in the ready list to favor it for issuing in
31792 if (is_store_insn (ready
[pos
], &str_mem
)
31794 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31796 INSN_PRIORITY (ready
[pos
])++;
31798 /* Adjust the pendulum to account for the fact that a store
31799 was found and increased in priority. This is to prevent
31800 increasing the priority of multiple stores */
31801 load_store_pendulum
++;
31810 /* Do Power9 dependent reordering if necessary. */
31811 if (rs6000_cpu
== PROCESSOR_POWER9
&& last_scheduled_insn
31812 && recog_memoized (last_scheduled_insn
) >= 0)
31813 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31815 return cached_can_issue_more
;
31818 /* Return whether the presence of INSN causes a dispatch group termination
31819 of group WHICH_GROUP.
31821 If WHICH_GROUP == current_group, this function will return true if INSN
31822 causes the termination of the current group (i.e, the dispatch group to
31823 which INSN belongs). This means that INSN will be the last insn in the
31824 group it belongs to.
31826 If WHICH_GROUP == previous_group, this function will return true if INSN
31827 causes the termination of the previous group (i.e, the dispatch group that
31828 precedes the group to which INSN belongs). This means that INSN will be
31829 the first insn in the group it belongs to). */
31832 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31839 first
= insn_must_be_first_in_group (insn
);
31840 last
= insn_must_be_last_in_group (insn
);
31845 if (which_group
== current_group
)
31847 else if (which_group
== previous_group
)
31855 insn_must_be_first_in_group (rtx_insn
*insn
)
31857 enum attr_type type
;
31861 || DEBUG_INSN_P (insn
)
31862 || GET_CODE (PATTERN (insn
)) == USE
31863 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31866 switch (rs6000_cpu
)
31868 case PROCESSOR_POWER5
:
31869 if (is_cracked_insn (insn
))
31872 case PROCESSOR_POWER4
:
31873 if (is_microcoded_insn (insn
))
31876 if (!rs6000_sched_groups
)
31879 type
= get_attr_type (insn
);
31886 case TYPE_DELAYED_CR
:
31887 case TYPE_CR_LOGICAL
:
31900 case PROCESSOR_POWER6
:
31901 type
= get_attr_type (insn
);
31910 case TYPE_FPCOMPARE
:
31921 if (get_attr_dot (insn
) == DOT_NO
31922 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31927 if (get_attr_size (insn
) == SIZE_32
)
31935 if (get_attr_update (insn
) == UPDATE_YES
)
31943 case PROCESSOR_POWER7
:
31944 type
= get_attr_type (insn
);
31948 case TYPE_CR_LOGICAL
:
31962 if (get_attr_dot (insn
) == DOT_YES
)
31967 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31968 || get_attr_update (insn
) == UPDATE_YES
)
31975 if (get_attr_update (insn
) == UPDATE_YES
)
31983 case PROCESSOR_POWER8
:
31984 type
= get_attr_type (insn
);
31988 case TYPE_CR_LOGICAL
:
31989 case TYPE_DELAYED_CR
:
31997 case TYPE_VECSTORE
:
32004 if (get_attr_dot (insn
) == DOT_YES
)
32009 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32010 || get_attr_update (insn
) == UPDATE_YES
)
32015 if (get_attr_update (insn
) == UPDATE_YES
32016 && get_attr_indexed (insn
) == INDEXED_YES
)
32032 insn_must_be_last_in_group (rtx_insn
*insn
)
32034 enum attr_type type
;
32038 || DEBUG_INSN_P (insn
)
32039 || GET_CODE (PATTERN (insn
)) == USE
32040 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
32043 switch (rs6000_cpu
) {
32044 case PROCESSOR_POWER4
:
32045 case PROCESSOR_POWER5
:
32046 if (is_microcoded_insn (insn
))
32049 if (is_branch_slot_insn (insn
))
32053 case PROCESSOR_POWER6
:
32054 type
= get_attr_type (insn
);
32062 case TYPE_FPCOMPARE
:
32073 if (get_attr_dot (insn
) == DOT_NO
32074 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
32079 if (get_attr_size (insn
) == SIZE_32
)
32087 case PROCESSOR_POWER7
:
32088 type
= get_attr_type (insn
);
32098 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32099 && get_attr_update (insn
) == UPDATE_YES
)
32104 if (get_attr_update (insn
) == UPDATE_YES
32105 && get_attr_indexed (insn
) == INDEXED_YES
)
32113 case PROCESSOR_POWER8
:
32114 type
= get_attr_type (insn
);
32126 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
32127 && get_attr_update (insn
) == UPDATE_YES
)
32132 if (get_attr_update (insn
) == UPDATE_YES
32133 && get_attr_indexed (insn
) == INDEXED_YES
)
32148 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32149 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32152 is_costly_group (rtx
*group_insns
, rtx next_insn
)
32155 int issue_rate
= rs6000_issue_rate ();
32157 for (i
= 0; i
< issue_rate
; i
++)
32159 sd_iterator_def sd_it
;
32161 rtx insn
= group_insns
[i
];
32166 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
32168 rtx next
= DEP_CON (dep
);
32170 if (next
== next_insn
32171 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
32179 /* Utility of the function redefine_groups.
32180 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32181 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32182 to keep it "far" (in a separate group) from GROUP_INSNS, following
32183 one of the following schemes, depending on the value of the flag
32184 -minsert_sched_nops = X:
32185 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32186 in order to force NEXT_INSN into a separate group.
32187 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32188 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32189 insertion (has a group just ended, how many vacant issue slots remain in the
32190 last group, and how many dispatch groups were encountered so far). */
32193 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
32194 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
32199 int issue_rate
= rs6000_issue_rate ();
32200 bool end
= *group_end
;
32203 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
32204 return can_issue_more
;
32206 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
32207 return can_issue_more
;
32209 force
= is_costly_group (group_insns
, next_insn
);
32211 return can_issue_more
;
32213 if (sched_verbose
> 6)
32214 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
32215 *group_count
,can_issue_more
);
32217 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
32220 can_issue_more
= 0;
32222 /* Since only a branch can be issued in the last issue_slot, it is
32223 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32224 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32225 in this case the last nop will start a new group and the branch
32226 will be forced to the new group. */
32227 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
32230 /* Do we have a special group ending nop? */
32231 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
32232 || rs6000_cpu_attr
== CPU_POWER8
)
32234 nop
= gen_group_ending_nop ();
32235 emit_insn_before (nop
, next_insn
);
32236 can_issue_more
= 0;
32239 while (can_issue_more
> 0)
32242 emit_insn_before (nop
, next_insn
);
32250 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
32252 int n_nops
= rs6000_sched_insert_nops
;
32254 /* Nops can't be issued from the branch slot, so the effective
32255 issue_rate for nops is 'issue_rate - 1'. */
32256 if (can_issue_more
== 0)
32257 can_issue_more
= issue_rate
;
32259 if (can_issue_more
== 0)
32261 can_issue_more
= issue_rate
- 1;
32264 for (i
= 0; i
< issue_rate
; i
++)
32266 group_insns
[i
] = 0;
32273 emit_insn_before (nop
, next_insn
);
32274 if (can_issue_more
== issue_rate
- 1) /* new group begins */
32277 if (can_issue_more
== 0)
32279 can_issue_more
= issue_rate
- 1;
32282 for (i
= 0; i
< issue_rate
; i
++)
32284 group_insns
[i
] = 0;
32290 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32293 /* Is next_insn going to start a new group? */
32296 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32297 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32298 || (can_issue_more
< issue_rate
&&
32299 insn_terminates_group_p (next_insn
, previous_group
)));
32300 if (*group_end
&& end
)
32303 if (sched_verbose
> 6)
32304 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
32305 *group_count
, can_issue_more
);
32306 return can_issue_more
;
32309 return can_issue_more
;
32312 /* This function tries to synch the dispatch groups that the compiler "sees"
32313 with the dispatch groups that the processor dispatcher is expected to
32314 form in practice. It tries to achieve this synchronization by forcing the
32315 estimated processor grouping on the compiler (as opposed to the function
32316 'pad_goups' which tries to force the scheduler's grouping on the processor).
32318 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32319 examines the (estimated) dispatch groups that will be formed by the processor
32320 dispatcher. It marks these group boundaries to reflect the estimated
32321 processor grouping, overriding the grouping that the scheduler had marked.
32322 Depending on the value of the flag '-minsert-sched-nops' this function can
32323 force certain insns into separate groups or force a certain distance between
32324 them by inserting nops, for example, if there exists a "costly dependence"
32327 The function estimates the group boundaries that the processor will form as
32328 follows: It keeps track of how many vacant issue slots are available after
32329 each insn. A subsequent insn will start a new group if one of the following
32331 - no more vacant issue slots remain in the current dispatch group.
32332 - only the last issue slot, which is the branch slot, is vacant, but the next
32333 insn is not a branch.
32334 - only the last 2 or less issue slots, including the branch slot, are vacant,
32335 which means that a cracked insn (which occupies two issue slots) can't be
32336 issued in this group.
32337 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32338 start a new group. */
32341 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32344 rtx_insn
*insn
, *next_insn
;
32346 int can_issue_more
;
32349 int group_count
= 0;
32353 issue_rate
= rs6000_issue_rate ();
32354 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
32355 for (i
= 0; i
< issue_rate
; i
++)
32357 group_insns
[i
] = 0;
32359 can_issue_more
= issue_rate
;
32361 insn
= get_next_active_insn (prev_head_insn
, tail
);
32364 while (insn
!= NULL_RTX
)
32366 slot
= (issue_rate
- can_issue_more
);
32367 group_insns
[slot
] = insn
;
32369 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32370 if (insn_terminates_group_p (insn
, current_group
))
32371 can_issue_more
= 0;
32373 next_insn
= get_next_active_insn (insn
, tail
);
32374 if (next_insn
== NULL_RTX
)
32375 return group_count
+ 1;
32377 /* Is next_insn going to start a new group? */
32379 = (can_issue_more
== 0
32380 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32381 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32382 || (can_issue_more
< issue_rate
&&
32383 insn_terminates_group_p (next_insn
, previous_group
)));
32385 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
32386 next_insn
, &group_end
, can_issue_more
,
32392 can_issue_more
= 0;
32393 for (i
= 0; i
< issue_rate
; i
++)
32395 group_insns
[i
] = 0;
32399 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
32400 PUT_MODE (next_insn
, VOIDmode
);
32401 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
32402 PUT_MODE (next_insn
, TImode
);
32405 if (can_issue_more
== 0)
32406 can_issue_more
= issue_rate
;
32409 return group_count
;
32412 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32413 dispatch group boundaries that the scheduler had marked. Pad with nops
32414 any dispatch groups which have vacant issue slots, in order to force the
32415 scheduler's grouping on the processor dispatcher. The function
32416 returns the number of dispatch groups found. */
32419 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32422 rtx_insn
*insn
, *next_insn
;
32425 int can_issue_more
;
32427 int group_count
= 0;
32429 /* Initialize issue_rate. */
32430 issue_rate
= rs6000_issue_rate ();
32431 can_issue_more
= issue_rate
;
32433 insn
= get_next_active_insn (prev_head_insn
, tail
);
32434 next_insn
= get_next_active_insn (insn
, tail
);
32436 while (insn
!= NULL_RTX
)
32439 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32441 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
32443 if (next_insn
== NULL_RTX
)
32448 /* If the scheduler had marked group termination at this location
32449 (between insn and next_insn), and neither insn nor next_insn will
32450 force group termination, pad the group with nops to force group
32453 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32454 && !insn_terminates_group_p (insn
, current_group
)
32455 && !insn_terminates_group_p (next_insn
, previous_group
))
32457 if (!is_branch_slot_insn (next_insn
))
32460 while (can_issue_more
)
32463 emit_insn_before (nop
, next_insn
);
32468 can_issue_more
= issue_rate
;
32473 next_insn
= get_next_active_insn (insn
, tail
);
32476 return group_count
;
32479 /* We're beginning a new block. Initialize data structures as necessary. */
32482 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
32483 int sched_verbose ATTRIBUTE_UNUSED
,
32484 int max_ready ATTRIBUTE_UNUSED
)
32486 last_scheduled_insn
= NULL
;
32487 load_store_pendulum
= 0;
32492 /* The following function is called at the end of scheduling BB.
32493 After reload, it inserts nops at insn group bundling. */
32496 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
32501 fprintf (dump
, "=== Finishing schedule.\n");
32503 if (reload_completed
&& rs6000_sched_groups
)
32505 /* Do not run sched_finish hook when selective scheduling enabled. */
32506 if (sel_sched_p ())
32509 if (rs6000_sched_insert_nops
== sched_finish_none
)
32512 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32513 n_groups
= pad_groups (dump
, sched_verbose
,
32514 current_sched_info
->prev_head
,
32515 current_sched_info
->next_tail
);
32517 n_groups
= redefine_groups (dump
, sched_verbose
,
32518 current_sched_info
->prev_head
,
32519 current_sched_info
->next_tail
);
32521 if (sched_verbose
>= 6)
32523 fprintf (dump
, "ngroups = %d\n", n_groups
);
32524 print_rtl (dump
, current_sched_info
->prev_head
);
32525 fprintf (dump
, "Done finish_sched\n");
32530 struct rs6000_sched_context
32532 short cached_can_issue_more
;
32533 rtx_insn
*last_scheduled_insn
;
32534 int load_store_pendulum
;
32539 typedef struct rs6000_sched_context rs6000_sched_context_def
;
32540 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
32542 /* Allocate store for new scheduling context. */
32544 rs6000_alloc_sched_context (void)
32546 return xmalloc (sizeof (rs6000_sched_context_def
));
32549 /* If CLEAN_P is true then initializes _SC with clean data,
32550 and from the global context otherwise. */
32552 rs6000_init_sched_context (void *_sc
, bool clean_p
)
32554 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32558 sc
->cached_can_issue_more
= 0;
32559 sc
->last_scheduled_insn
= NULL
;
32560 sc
->load_store_pendulum
= 0;
32561 sc
->divide_cnt
= 0;
32562 sc
->vec_pairing
= 0;
32566 sc
->cached_can_issue_more
= cached_can_issue_more
;
32567 sc
->last_scheduled_insn
= last_scheduled_insn
;
32568 sc
->load_store_pendulum
= load_store_pendulum
;
32569 sc
->divide_cnt
= divide_cnt
;
32570 sc
->vec_pairing
= vec_pairing
;
32574 /* Sets the global scheduling context to the one pointed to by _SC. */
32576 rs6000_set_sched_context (void *_sc
)
32578 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32580 gcc_assert (sc
!= NULL
);
32582 cached_can_issue_more
= sc
->cached_can_issue_more
;
32583 last_scheduled_insn
= sc
->last_scheduled_insn
;
32584 load_store_pendulum
= sc
->load_store_pendulum
;
32585 divide_cnt
= sc
->divide_cnt
;
32586 vec_pairing
= sc
->vec_pairing
;
32591 rs6000_free_sched_context (void *_sc
)
32593 gcc_assert (_sc
!= NULL
);
32599 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
32601 switch (get_attr_type (insn
))
32616 /* Length in units of the trampoline for entering a nested function. */
32619 rs6000_trampoline_size (void)
32623 switch (DEFAULT_ABI
)
32626 gcc_unreachable ();
32629 ret
= (TARGET_32BIT
) ? 12 : 24;
32633 gcc_assert (!TARGET_32BIT
);
32639 ret
= (TARGET_32BIT
) ? 40 : 48;
32646 /* Emit RTL insns to initialize the variable parts of a trampoline.
32647 FNADDR is an RTX for the address of the function's pure code.
32648 CXT is an RTX for the static chain value for the function. */
32651 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
32653 int regsize
= (TARGET_32BIT
) ? 4 : 8;
32654 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
32655 rtx ctx_reg
= force_reg (Pmode
, cxt
);
32656 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
32658 switch (DEFAULT_ABI
)
32661 gcc_unreachable ();
32663 /* Under AIX, just build the 3 word function descriptor */
32666 rtx fnmem
, fn_reg
, toc_reg
;
32668 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32669 error ("you cannot take the address of a nested function if you use "
32670 "the %qs option", "-mno-pointers-to-nested-functions");
32672 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32673 fn_reg
= gen_reg_rtx (Pmode
);
32674 toc_reg
= gen_reg_rtx (Pmode
);
32676 /* Macro to shorten the code expansions below. */
32677 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32679 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32681 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32682 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32683 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32684 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32685 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32691 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32695 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32696 LCT_NORMAL
, VOIDmode
,
32698 GEN_INT (rs6000_trampoline_size ()), SImode
,
32706 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32707 identifier as an argument, so the front end shouldn't look it up. */
32710 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32712 return is_attribute_p ("altivec", attr_id
);
32715 /* Handle the "altivec" attribute. The attribute may have
32716 arguments as follows:
32718 __attribute__((altivec(vector__)))
32719 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32720 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32722 and may appear more than once (e.g., 'vector bool char') in a
32723 given declaration. */
32726 rs6000_handle_altivec_attribute (tree
*node
,
32727 tree name ATTRIBUTE_UNUSED
,
32729 int flags ATTRIBUTE_UNUSED
,
32730 bool *no_add_attrs
)
32732 tree type
= *node
, result
= NULL_TREE
;
32736 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32737 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32738 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32741 while (POINTER_TYPE_P (type
)
32742 || TREE_CODE (type
) == FUNCTION_TYPE
32743 || TREE_CODE (type
) == METHOD_TYPE
32744 || TREE_CODE (type
) == ARRAY_TYPE
)
32745 type
= TREE_TYPE (type
);
32747 mode
= TYPE_MODE (type
);
32749 /* Check for invalid AltiVec type qualifiers. */
32750 if (type
== long_double_type_node
)
32751 error ("use of %<long double%> in AltiVec types is invalid");
32752 else if (type
== boolean_type_node
)
32753 error ("use of boolean types in AltiVec types is invalid");
32754 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32755 error ("use of %<complex%> in AltiVec types is invalid");
32756 else if (DECIMAL_FLOAT_MODE_P (mode
))
32757 error ("use of decimal floating point types in AltiVec types is invalid");
32758 else if (!TARGET_VSX
)
32760 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32763 error ("use of %<long%> in AltiVec types is invalid for "
32764 "64-bit code without %qs", "-mvsx");
32765 else if (rs6000_warn_altivec_long
)
32766 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32769 else if (type
== long_long_unsigned_type_node
32770 || type
== long_long_integer_type_node
)
32771 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32773 else if (type
== double_type_node
)
32774 error ("use of %<double%> in AltiVec types is invalid without %qs",
32778 switch (altivec_type
)
32781 unsigned_p
= TYPE_UNSIGNED (type
);
32785 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32788 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32791 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32794 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32797 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32799 case E_SFmode
: result
= V4SF_type_node
; break;
32800 case E_DFmode
: result
= V2DF_type_node
; break;
32801 /* If the user says 'vector int bool', we may be handed the 'bool'
32802 attribute _before_ the 'vector' attribute, and so select the
32803 proper type in the 'b' case below. */
32804 case E_V4SImode
: case E_V8HImode
: case E_V16QImode
: case E_V4SFmode
:
32805 case E_V2DImode
: case E_V2DFmode
:
32813 case E_DImode
: case E_V2DImode
: result
= bool_V2DI_type_node
; break;
32814 case E_SImode
: case E_V4SImode
: result
= bool_V4SI_type_node
; break;
32815 case E_HImode
: case E_V8HImode
: result
= bool_V8HI_type_node
; break;
32816 case E_QImode
: case E_V16QImode
: result
= bool_V16QI_type_node
;
32823 case E_V8HImode
: result
= pixel_V8HI_type_node
;
32829 /* Propagate qualifiers attached to the element type
32830 onto the vector type. */
32831 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32832 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32834 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32837 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32842 /* AltiVec defines four built-in scalar types that serve as vector
32843 elements; we must teach the compiler how to mangle them. */
32845 static const char *
32846 rs6000_mangle_type (const_tree type
)
32848 type
= TYPE_MAIN_VARIANT (type
);
32850 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32851 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32854 if (type
== bool_char_type_node
) return "U6__boolc";
32855 if (type
== bool_short_type_node
) return "U6__bools";
32856 if (type
== pixel_type_node
) return "u7__pixel";
32857 if (type
== bool_int_type_node
) return "U6__booli";
32858 if (type
== bool_long_type_node
) return "U6__booll";
32860 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32861 "g" for IBM extended double, no matter whether it is long double (using
32862 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32863 if (TARGET_FLOAT128_TYPE
)
32865 if (type
== ieee128_float_type_node
)
32866 return "U10__float128";
32868 if (TARGET_LONG_DOUBLE_128
)
32870 if (type
== long_double_type_node
)
32871 return (TARGET_IEEEQUAD
) ? "U10__float128" : "g";
32873 if (type
== ibm128_float_type_node
)
32878 /* Mangle IBM extended float long double as `g' (__float128) on
32879 powerpc*-linux where long-double-64 previously was the default. */
32880 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
32882 && TARGET_LONG_DOUBLE_128
32883 && !TARGET_IEEEQUAD
)
32886 /* For all other types, use normal C++ mangling. */
32890 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32891 struct attribute_spec.handler. */
32894 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32895 tree args ATTRIBUTE_UNUSED
,
32896 int flags ATTRIBUTE_UNUSED
,
32897 bool *no_add_attrs
)
32899 if (TREE_CODE (*node
) != FUNCTION_TYPE
32900 && TREE_CODE (*node
) != FIELD_DECL
32901 && TREE_CODE (*node
) != TYPE_DECL
)
32903 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32905 *no_add_attrs
= true;
32911 /* Set longcall attributes on all functions declared when
32912 rs6000_default_long_calls is true. */
32914 rs6000_set_default_type_attributes (tree type
)
32916 if (rs6000_default_long_calls
32917 && (TREE_CODE (type
) == FUNCTION_TYPE
32918 || TREE_CODE (type
) == METHOD_TYPE
))
32919 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32921 TYPE_ATTRIBUTES (type
));
32924 darwin_set_default_type_attributes (type
);
32928 /* Return a reference suitable for calling a function with the
32929 longcall attribute. */
32932 rs6000_longcall_ref (rtx call_ref
)
32934 const char *call_name
;
32937 if (GET_CODE (call_ref
) != SYMBOL_REF
)
32940 /* System V adds '.' to the internal name, so skip them. */
32941 call_name
= XSTR (call_ref
, 0);
32942 if (*call_name
== '.')
32944 while (*call_name
== '.')
32947 node
= get_identifier (call_name
);
32948 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
32951 return force_reg (Pmode
, call_ref
);
32954 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32955 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32958 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32959 struct attribute_spec.handler. */
32961 rs6000_handle_struct_attribute (tree
*node
, tree name
,
32962 tree args ATTRIBUTE_UNUSED
,
32963 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
32966 if (DECL_P (*node
))
32968 if (TREE_CODE (*node
) == TYPE_DECL
)
32969 type
= &TREE_TYPE (*node
);
32974 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
32975 || TREE_CODE (*type
) == UNION_TYPE
)))
32977 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
32978 *no_add_attrs
= true;
32981 else if ((is_attribute_p ("ms_struct", name
)
32982 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
32983 || ((is_attribute_p ("gcc_struct", name
)
32984 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
32986 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
32988 *no_add_attrs
= true;
32995 rs6000_ms_bitfield_layout_p (const_tree record_type
)
32997 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
32998 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
32999 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
33002 #ifdef USING_ELFOS_H
33004 /* A get_unnamed_section callback, used for switching to toc_section. */
33007 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33009 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33010 && TARGET_MINIMAL_TOC
)
33012 if (!toc_initialized
)
33014 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
33015 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33016 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
33017 fprintf (asm_out_file
, "\t.tc ");
33018 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
33019 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33020 fprintf (asm_out_file
, "\n");
33022 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33023 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33024 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33025 fprintf (asm_out_file
, " = .+32768\n");
33026 toc_initialized
= 1;
33029 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33031 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33033 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
33034 if (!toc_initialized
)
33036 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33037 toc_initialized
= 1;
33042 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33043 if (!toc_initialized
)
33045 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
33046 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
33047 fprintf (asm_out_file
, " = .+32768\n");
33048 toc_initialized
= 1;
33053 /* Implement TARGET_ASM_INIT_SECTIONS. */
33056 rs6000_elf_asm_init_sections (void)
33059 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
33062 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
33063 SDATA2_SECTION_ASM_OP
);
33066 /* Implement TARGET_SELECT_RTX_SECTION. */
33069 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
33070 unsigned HOST_WIDE_INT align
)
33072 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33073 return toc_section
;
33075 return default_elf_select_rtx_section (mode
, x
, align
);
33078 /* For a SYMBOL_REF, set generic flags and then perform some
33079 target-specific processing.
33081 When the AIX ABI is requested on a non-AIX system, replace the
33082 function name with the real name (with a leading .) rather than the
33083 function descriptor name. This saves a lot of overriding code to
33084 read the prefixes. */
33086 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
33088 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
33090 default_encode_section_info (decl
, rtl
, first
);
33093 && TREE_CODE (decl
) == FUNCTION_DECL
33095 && DEFAULT_ABI
== ABI_AIX
)
33097 rtx sym_ref
= XEXP (rtl
, 0);
33098 size_t len
= strlen (XSTR (sym_ref
, 0));
33099 char *str
= XALLOCAVEC (char, len
+ 2);
33101 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
33102 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
33107 compare_section_name (const char *section
, const char *templ
)
33111 len
= strlen (templ
);
33112 return (strncmp (section
, templ
, len
) == 0
33113 && (section
[len
] == 0 || section
[len
] == '.'));
33117 rs6000_elf_in_small_data_p (const_tree decl
)
33119 if (rs6000_sdata
== SDATA_NONE
)
33122 /* We want to merge strings, so we never consider them small data. */
33123 if (TREE_CODE (decl
) == STRING_CST
)
33126 /* Functions are never in the small data area. */
33127 if (TREE_CODE (decl
) == FUNCTION_DECL
)
33130 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
33132 const char *section
= DECL_SECTION_NAME (decl
);
33133 if (compare_section_name (section
, ".sdata")
33134 || compare_section_name (section
, ".sdata2")
33135 || compare_section_name (section
, ".gnu.linkonce.s")
33136 || compare_section_name (section
, ".sbss")
33137 || compare_section_name (section
, ".sbss2")
33138 || compare_section_name (section
, ".gnu.linkonce.sb")
33139 || strcmp (section
, ".PPC.EMB.sdata0") == 0
33140 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
33145 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
33148 && size
<= g_switch_value
33149 /* If it's not public, and we're not going to reference it there,
33150 there's no need to put it in the small data section. */
33151 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
33158 #endif /* USING_ELFOS_H */
33160 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33163 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
33165 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
33168 /* Do not place thread-local symbols refs in the object blocks. */
33171 rs6000_use_blocks_for_decl_p (const_tree decl
)
33173 return !DECL_THREAD_LOCAL_P (decl
);
33176 /* Return a REG that occurs in ADDR with coefficient 1.
33177 ADDR can be effectively incremented by incrementing REG.
33179 r0 is special and we must not select it as an address
33180 register by this routine since our caller will try to
33181 increment the returned register via an "la" instruction. */
33184 find_addr_reg (rtx addr
)
33186 while (GET_CODE (addr
) == PLUS
)
33188 if (GET_CODE (XEXP (addr
, 0)) == REG
33189 && REGNO (XEXP (addr
, 0)) != 0)
33190 addr
= XEXP (addr
, 0);
33191 else if (GET_CODE (XEXP (addr
, 1)) == REG
33192 && REGNO (XEXP (addr
, 1)) != 0)
33193 addr
= XEXP (addr
, 1);
33194 else if (CONSTANT_P (XEXP (addr
, 0)))
33195 addr
= XEXP (addr
, 1);
33196 else if (CONSTANT_P (XEXP (addr
, 1)))
33197 addr
= XEXP (addr
, 0);
33199 gcc_unreachable ();
33201 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
33206 rs6000_fatal_bad_address (rtx op
)
33208 fatal_insn ("bad address", op
);
33213 typedef struct branch_island_d
{
33214 tree function_name
;
33220 static vec
<branch_island
, va_gc
> *branch_islands
;
33222 /* Remember to generate a branch island for far calls to the given
33226 add_compiler_branch_island (tree label_name
, tree function_name
,
33229 branch_island bi
= {function_name
, label_name
, line_number
};
33230 vec_safe_push (branch_islands
, bi
);
33233 /* Generate far-jump branch islands for everything recorded in
33234 branch_islands. Invoked immediately after the last instruction of
33235 the epilogue has been emitted; the branch islands must be appended
33236 to, and contiguous with, the function body. Mach-O stubs are
33237 generated in machopic_output_stub(). */
33240 macho_branch_islands (void)
33244 while (!vec_safe_is_empty (branch_islands
))
33246 branch_island
*bi
= &branch_islands
->last ();
33247 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
33248 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
33249 char name_buf
[512];
33250 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33251 if (name
[0] == '*' || name
[0] == '&')
33252 strcpy (name_buf
, name
+1);
33256 strcpy (name_buf
+1, name
);
33258 strcpy (tmp_buf
, "\n");
33259 strcat (tmp_buf
, label
);
33260 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33261 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33262 dbxout_stabd (N_SLINE
, bi
->line_number
);
33263 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33266 if (TARGET_LINK_STACK
)
33269 get_ppc476_thunk_name (name
);
33270 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
33271 strcat (tmp_buf
, name
);
33272 strcat (tmp_buf
, "\n");
33273 strcat (tmp_buf
, label
);
33274 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33278 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
33279 strcat (tmp_buf
, label
);
33280 strcat (tmp_buf
, "_pic\n");
33281 strcat (tmp_buf
, label
);
33282 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33285 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
33286 strcat (tmp_buf
, name_buf
);
33287 strcat (tmp_buf
, " - ");
33288 strcat (tmp_buf
, label
);
33289 strcat (tmp_buf
, "_pic)\n");
33291 strcat (tmp_buf
, "\tmtlr r0\n");
33293 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
33294 strcat (tmp_buf
, name_buf
);
33295 strcat (tmp_buf
, " - ");
33296 strcat (tmp_buf
, label
);
33297 strcat (tmp_buf
, "_pic)\n");
33299 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
33303 strcat (tmp_buf
, ":\nlis r12,hi16(");
33304 strcat (tmp_buf
, name_buf
);
33305 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
33306 strcat (tmp_buf
, name_buf
);
33307 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
33309 output_asm_insn (tmp_buf
, 0);
33310 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33311 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33312 dbxout_stabd (N_SLINE
, bi
->line_number
);
33313 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33314 branch_islands
->pop ();
33318 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33319 already there or not. */
33322 no_previous_def (tree function_name
)
33327 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33328 if (function_name
== bi
->function_name
)
33333 /* GET_PREV_LABEL gets the label name from the previous definition of
33337 get_prev_label (tree function_name
)
33342 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33343 if (function_name
== bi
->function_name
)
33344 return bi
->label_name
;
33348 /* INSN is either a function call or a millicode call. It may have an
33349 unconditional jump in its delay slot.
33351 CALL_DEST is the routine we are calling. */
33354 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
33355 int cookie_operand_number
)
33357 static char buf
[256];
33358 if (darwin_emit_branch_islands
33359 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
33360 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
33363 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
33365 if (no_previous_def (funname
))
33367 rtx label_rtx
= gen_label_rtx ();
33368 char *label_buf
, temp_buf
[256];
33369 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
33370 CODE_LABEL_NUMBER (label_rtx
));
33371 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
33372 labelname
= get_identifier (label_buf
);
33373 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
33376 labelname
= get_prev_label (funname
);
33378 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33379 instruction will reach 'foo', otherwise link as 'bl L42'".
33380 "L42" should be a 'branch island', that will do a far jump to
33381 'foo'. Branch islands are generated in
33382 macho_branch_islands(). */
33383 sprintf (buf
, "jbsr %%z%d,%.246s",
33384 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
33387 sprintf (buf
, "bl %%z%d", dest_operand_number
);
33391 /* Generate PIC and indirect symbol stubs. */
33394 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
33396 unsigned int length
;
33397 char *symbol_name
, *lazy_ptr_name
;
33398 char *local_label_0
;
33399 static int label
= 0;
33401 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33402 symb
= (*targetm
.strip_name_encoding
) (symb
);
33405 length
= strlen (symb
);
33406 symbol_name
= XALLOCAVEC (char, length
+ 32);
33407 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
33409 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
33410 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
33413 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
33415 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
33419 fprintf (file
, "\t.align 5\n");
33421 fprintf (file
, "%s:\n", stub
);
33422 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33425 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33426 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
33428 fprintf (file
, "\tmflr r0\n");
33429 if (TARGET_LINK_STACK
)
33432 get_ppc476_thunk_name (name
);
33433 fprintf (file
, "\tbl %s\n", name
);
33434 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33438 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
33439 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33441 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
33442 lazy_ptr_name
, local_label_0
);
33443 fprintf (file
, "\tmtlr r0\n");
33444 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
33445 (TARGET_64BIT
? "ldu" : "lwzu"),
33446 lazy_ptr_name
, local_label_0
);
33447 fprintf (file
, "\tmtctr r12\n");
33448 fprintf (file
, "\tbctr\n");
33452 fprintf (file
, "\t.align 4\n");
33454 fprintf (file
, "%s:\n", stub
);
33455 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33457 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
33458 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
33459 (TARGET_64BIT
? "ldu" : "lwzu"),
33461 fprintf (file
, "\tmtctr r12\n");
33462 fprintf (file
, "\tbctr\n");
33465 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
33466 fprintf (file
, "%s:\n", lazy_ptr_name
);
33467 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33468 fprintf (file
, "%sdyld_stub_binding_helper\n",
33469 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
33472 /* Legitimize PIC addresses. If the address is already
33473 position-independent, we return ORIG. Newly generated
33474 position-independent addresses go into a reg. This is REG if non
33475 zero, otherwise we allocate register(s) as necessary. */
33477 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33480 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
33485 if (reg
== NULL
&& !reload_completed
)
33486 reg
= gen_reg_rtx (Pmode
);
33488 if (GET_CODE (orig
) == CONST
)
33492 if (GET_CODE (XEXP (orig
, 0)) == PLUS
33493 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
33496 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
33498 /* Use a different reg for the intermediate value, as
33499 it will be marked UNCHANGING. */
33500 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
33501 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
33504 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
33507 if (GET_CODE (offset
) == CONST_INT
)
33509 if (SMALL_INT (offset
))
33510 return plus_constant (Pmode
, base
, INTVAL (offset
));
33511 else if (!reload_completed
)
33512 offset
= force_reg (Pmode
, offset
);
33515 rtx mem
= force_const_mem (Pmode
, orig
);
33516 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
33519 return gen_rtx_PLUS (Pmode
, base
, offset
);
33522 /* Fall back on generic machopic code. */
33523 return machopic_legitimize_pic_address (orig
, mode
, reg
);
33526 /* Output a .machine directive for the Darwin assembler, and call
33527 the generic start_file routine. */
33530 rs6000_darwin_file_start (void)
33532 static const struct
33536 HOST_WIDE_INT if_set
;
33538 { "ppc64", "ppc64", MASK_64BIT
},
33539 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
33540 { "power4", "ppc970", 0 },
33541 { "G5", "ppc970", 0 },
33542 { "7450", "ppc7450", 0 },
33543 { "7400", "ppc7400", MASK_ALTIVEC
},
33544 { "G4", "ppc7400", 0 },
33545 { "750", "ppc750", 0 },
33546 { "740", "ppc750", 0 },
33547 { "G3", "ppc750", 0 },
33548 { "604e", "ppc604e", 0 },
33549 { "604", "ppc604", 0 },
33550 { "603e", "ppc603", 0 },
33551 { "603", "ppc603", 0 },
33552 { "601", "ppc601", 0 },
33553 { NULL
, "ppc", 0 } };
33554 const char *cpu_id
= "";
33557 rs6000_file_start ();
33558 darwin_file_start ();
33560 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33562 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
33563 cpu_id
= rs6000_default_cpu
;
33565 if (global_options_set
.x_rs6000_cpu_index
)
33566 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
33568 /* Look through the mapping array. Pick the first name that either
33569 matches the argument, has a bit set in IF_SET that is also set
33570 in the target flags, or has a NULL name. */
33573 while (mapping
[i
].arg
!= NULL
33574 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
33575 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
33578 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
33581 #endif /* TARGET_MACHO */
33585 rs6000_elf_reloc_rw_mask (void)
33589 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33595 /* Record an element in the table of global constructors. SYMBOL is
33596 a SYMBOL_REF of the function to be called; PRIORITY is a number
33597 between 0 and MAX_INIT_PRIORITY.
33599 This differs from default_named_section_asm_out_constructor in
33600 that we have special handling for -mrelocatable. */
33602 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
33604 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
33606 const char *section
= ".ctors";
33609 if (priority
!= DEFAULT_INIT_PRIORITY
)
33611 sprintf (buf
, ".ctors.%.5u",
33612 /* Invert the numbering so the linker puts us in the proper
33613 order; constructors are run from right to left, and the
33614 linker sorts in increasing order. */
33615 MAX_INIT_PRIORITY
- priority
);
33619 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33620 assemble_align (POINTER_SIZE
);
33622 if (DEFAULT_ABI
== ABI_V4
33623 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33625 fputs ("\t.long (", asm_out_file
);
33626 output_addr_const (asm_out_file
, symbol
);
33627 fputs (")@fixup\n", asm_out_file
);
33630 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33633 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
33635 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
33637 const char *section
= ".dtors";
33640 if (priority
!= DEFAULT_INIT_PRIORITY
)
33642 sprintf (buf
, ".dtors.%.5u",
33643 /* Invert the numbering so the linker puts us in the proper
33644 order; constructors are run from right to left, and the
33645 linker sorts in increasing order. */
33646 MAX_INIT_PRIORITY
- priority
);
33650 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33651 assemble_align (POINTER_SIZE
);
33653 if (DEFAULT_ABI
== ABI_V4
33654 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33656 fputs ("\t.long (", asm_out_file
);
33657 output_addr_const (asm_out_file
, symbol
);
33658 fputs (")@fixup\n", asm_out_file
);
33661 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33665 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
33667 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
33669 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
33670 ASM_OUTPUT_LABEL (file
, name
);
33671 fputs (DOUBLE_INT_ASM_OP
, file
);
33672 rs6000_output_function_entry (file
, name
);
33673 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33676 fputs ("\t.size\t", file
);
33677 assemble_name (file
, name
);
33678 fputs (",24\n\t.type\t.", file
);
33679 assemble_name (file
, name
);
33680 fputs (",@function\n", file
);
33681 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33683 fputs ("\t.globl\t.", file
);
33684 assemble_name (file
, name
);
33689 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33690 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33691 rs6000_output_function_entry (file
, name
);
33692 fputs (":\n", file
);
33697 if (DEFAULT_ABI
== ABI_V4
33698 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33699 && !TARGET_SECURE_PLT
33700 && (!constant_pool_empty_p () || crtl
->profile
)
33701 && (uses_toc
= uses_TOC ()))
33706 switch_to_other_text_partition ();
33707 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33709 fprintf (file
, "\t.long ");
33710 assemble_name (file
, toc_label_name
);
33713 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33714 assemble_name (file
, buf
);
33717 switch_to_other_text_partition ();
33720 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33721 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33723 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33727 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33729 fprintf (file
, "\t.quad .TOC.-");
33730 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33731 assemble_name (file
, buf
);
33735 if (DEFAULT_ABI
== ABI_AIX
)
33737 const char *desc_name
, *orig_name
;
33739 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33740 desc_name
= orig_name
;
33741 while (*desc_name
== '.')
33744 if (TREE_PUBLIC (decl
))
33745 fprintf (file
, "\t.globl %s\n", desc_name
);
33747 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33748 fprintf (file
, "%s:\n", desc_name
);
33749 fprintf (file
, "\t.long %s\n", orig_name
);
33750 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33751 fputs ("\t.long 0\n", file
);
33752 fprintf (file
, "\t.previous\n");
33754 ASM_OUTPUT_LABEL (file
, name
);
33757 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33759 rs6000_elf_file_end (void)
33761 #ifdef HAVE_AS_GNU_ATTRIBUTE
33762 /* ??? The value emitted depends on options active at file end.
33763 Assume anyone using #pragma or attributes that might change
33764 options knows what they are doing. */
33765 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33766 && rs6000_passes_float
)
33772 else if (TARGET_SF_FPR
)
33776 if (rs6000_passes_long_double
)
33778 if (!TARGET_LONG_DOUBLE_128
)
33780 else if (TARGET_IEEEQUAD
)
33785 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33787 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33789 if (rs6000_passes_vector
)
33790 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33791 (TARGET_ALTIVEC_ABI
? 2 : 1));
33792 if (rs6000_returns_struct
)
33793 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33794 aix_struct_return
? 2 : 1);
33797 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33798 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33799 file_end_indicate_exec_stack ();
33802 if (flag_split_stack
)
33803 file_end_indicate_split_stack ();
33807 /* We have expanded a CPU builtin, so we need to emit a reference to
33808 the special symbol that LIBC uses to declare it supports the
33809 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33810 switch_to_section (data_section
);
33811 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33812 fprintf (asm_out_file
, "\t%s %s\n",
33813 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33820 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33821 #define HAVE_XCOFF_DWARF_EXTRAS 0
33824 static enum unwind_info_type
33825 rs6000_xcoff_debug_unwind_info (void)
33831 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33835 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33836 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33837 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33838 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33839 fprintf (asm_out_file
, ",");
33840 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33841 fprintf (asm_out_file
, "\n");
33845 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33847 fputs (GLOBAL_ASM_OP
, stream
);
33848 RS6000_OUTPUT_BASENAME (stream
, name
);
33849 putc ('\n', stream
);
33852 /* A get_unnamed_decl callback, used for read-only sections. PTR
33853 points to the section string variable. */
33856 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33858 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33859 *(const char *const *) directive
,
33860 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33863 /* Likewise for read-write sections. */
33866 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33868 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33869 *(const char *const *) directive
,
33870 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33874 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33876 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33877 *(const char *const *) directive
,
33878 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33881 /* A get_unnamed_section callback, used for switching to toc_section. */
33884 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33886 if (TARGET_MINIMAL_TOC
)
33888 /* toc_section is always selected at least once from
33889 rs6000_xcoff_file_start, so this is guaranteed to
33890 always be defined once and only once in each file. */
33891 if (!toc_initialized
)
33893 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33894 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33895 toc_initialized
= 1;
33897 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33898 (TARGET_32BIT
? "" : ",3"));
33901 fputs ("\t.toc\n", asm_out_file
);
33904 /* Implement TARGET_ASM_INIT_SECTIONS. */
33907 rs6000_xcoff_asm_init_sections (void)
33909 read_only_data_section
33910 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33911 &xcoff_read_only_section_name
);
33913 private_data_section
33914 = get_unnamed_section (SECTION_WRITE
,
33915 rs6000_xcoff_output_readwrite_section_asm_op
,
33916 &xcoff_private_data_section_name
);
33919 = get_unnamed_section (SECTION_TLS
,
33920 rs6000_xcoff_output_tls_section_asm_op
,
33921 &xcoff_tls_data_section_name
);
33923 tls_private_data_section
33924 = get_unnamed_section (SECTION_TLS
,
33925 rs6000_xcoff_output_tls_section_asm_op
,
33926 &xcoff_private_data_section_name
);
33928 read_only_private_data_section
33929 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33930 &xcoff_private_data_section_name
);
33933 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
33935 readonly_data_section
= read_only_data_section
;
33939 rs6000_xcoff_reloc_rw_mask (void)
33945 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
33946 tree decl ATTRIBUTE_UNUSED
)
33949 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
33951 if (flags
& SECTION_EXCLUDE
)
33953 else if (flags
& SECTION_DEBUG
)
33955 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
33958 else if (flags
& SECTION_CODE
)
33960 else if (flags
& SECTION_TLS
)
33962 else if (flags
& SECTION_WRITE
)
33967 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
33968 (flags
& SECTION_CODE
) ? "." : "",
33969 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
33972 #define IN_NAMED_SECTION(DECL) \
33973 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33974 && DECL_SECTION_NAME (DECL) != NULL)
33977 rs6000_xcoff_select_section (tree decl
, int reloc
,
33978 unsigned HOST_WIDE_INT align
)
33980 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33982 if (align
> BIGGEST_ALIGNMENT
)
33984 resolve_unique_section (decl
, reloc
, true);
33985 if (IN_NAMED_SECTION (decl
))
33986 return get_named_section (decl
, NULL
, reloc
);
33989 if (decl_readonly_section (decl
, reloc
))
33991 if (TREE_PUBLIC (decl
))
33992 return read_only_data_section
;
33994 return read_only_private_data_section
;
33999 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34001 if (TREE_PUBLIC (decl
))
34002 return tls_data_section
;
34003 else if (bss_initializer_p (decl
))
34005 /* Convert to COMMON to emit in BSS. */
34006 DECL_COMMON (decl
) = 1;
34007 return tls_comm_section
;
34010 return tls_private_data_section
;
34014 if (TREE_PUBLIC (decl
))
34015 return data_section
;
34017 return private_data_section
;
34022 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
34026 /* Use select_section for private data and uninitialized data with
34027 alignment <= BIGGEST_ALIGNMENT. */
34028 if (!TREE_PUBLIC (decl
)
34029 || DECL_COMMON (decl
)
34030 || (DECL_INITIAL (decl
) == NULL_TREE
34031 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
34032 || DECL_INITIAL (decl
) == error_mark_node
34033 || (flag_zero_initialized_in_bss
34034 && initializer_zerop (DECL_INITIAL (decl
))))
34037 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
34038 name
= (*targetm
.strip_name_encoding
) (name
);
34039 set_decl_section_name (decl
, name
);
34042 /* Select section for constant in constant pool.
34044 On RS/6000, all constants are in the private read-only data area.
34045 However, if this is being placed in the TOC it must be output as a
34049 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
34050 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
34052 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
34053 return toc_section
;
34055 return read_only_private_data_section
;
34058 /* Remove any trailing [DS] or the like from the symbol name. */
34060 static const char *
34061 rs6000_xcoff_strip_name_encoding (const char *name
)
34066 len
= strlen (name
);
34067 if (name
[len
- 1] == ']')
34068 return ggc_alloc_string (name
, len
- 4);
34073 /* Section attributes. AIX is always PIC. */
34075 static unsigned int
34076 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
34078 unsigned int align
;
34079 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
34081 /* Align to at least UNIT size. */
34082 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
34083 align
= MIN_UNITS_PER_WORD
;
34085 /* Increase alignment of large objects if not already stricter. */
34086 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
34087 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
34088 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
34090 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
34093 /* Output at beginning of assembler file.
34095 Initialize the section names for the RS/6000 at this point.
34097 Specify filename, including full path, to assembler.
34099 We want to go into the TOC section so at least one .toc will be emitted.
34100 Also, in order to output proper .bs/.es pairs, we need at least one static
34101 [RW] section emitted.
34103 Finally, declare mcount when profiling to make the assembler happy. */
34106 rs6000_xcoff_file_start (void)
34108 rs6000_gen_section_name (&xcoff_bss_section_name
,
34109 main_input_filename
, ".bss_");
34110 rs6000_gen_section_name (&xcoff_private_data_section_name
,
34111 main_input_filename
, ".rw_");
34112 rs6000_gen_section_name (&xcoff_read_only_section_name
,
34113 main_input_filename
, ".ro_");
34114 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
34115 main_input_filename
, ".tls_");
34116 rs6000_gen_section_name (&xcoff_tbss_section_name
,
34117 main_input_filename
, ".tbss_[UL]");
34119 fputs ("\t.file\t", asm_out_file
);
34120 output_quoted_string (asm_out_file
, main_input_filename
);
34121 fputc ('\n', asm_out_file
);
34122 if (write_symbols
!= NO_DEBUG
)
34123 switch_to_section (private_data_section
);
34124 switch_to_section (toc_section
);
34125 switch_to_section (text_section
);
34127 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
34128 rs6000_file_start ();
34131 /* Output at end of assembler file.
34132 On the RS/6000, referencing data should automatically pull in text. */
34135 rs6000_xcoff_file_end (void)
34137 switch_to_section (text_section
);
34138 fputs ("_section_.text:\n", asm_out_file
);
34139 switch_to_section (data_section
);
34140 fputs (TARGET_32BIT
34141 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34145 struct declare_alias_data
34148 bool function_descriptor
;
34151 /* Declare alias N. A helper function for for_node_and_aliases. */
34154 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
34156 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
34157 /* Main symbol is output specially, because varasm machinery does part of
34158 the job for us - we do not need to declare .globl/lglobs and such. */
34159 if (!n
->alias
|| n
->weakref
)
34162 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
34165 /* Prevent assemble_alias from trying to use .set pseudo operation
34166 that does not behave as expected by the middle-end. */
34167 TREE_ASM_WRITTEN (n
->decl
) = true;
34169 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
34170 char *buffer
= (char *) alloca (strlen (name
) + 2);
34172 int dollar_inside
= 0;
34174 strcpy (buffer
, name
);
34175 p
= strchr (buffer
, '$');
34179 p
= strchr (p
+ 1, '$');
34181 if (TREE_PUBLIC (n
->decl
))
34183 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
34185 if (dollar_inside
) {
34186 if (data
->function_descriptor
)
34187 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34188 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34190 if (data
->function_descriptor
)
34192 fputs ("\t.globl .", data
->file
);
34193 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34194 putc ('\n', data
->file
);
34196 fputs ("\t.globl ", data
->file
);
34197 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34198 putc ('\n', data
->file
);
34200 #ifdef ASM_WEAKEN_DECL
34201 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
34202 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
34209 if (data
->function_descriptor
)
34210 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34211 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34213 if (data
->function_descriptor
)
34215 fputs ("\t.lglobl .", data
->file
);
34216 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34217 putc ('\n', data
->file
);
34219 fputs ("\t.lglobl ", data
->file
);
34220 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34221 putc ('\n', data
->file
);
34223 if (data
->function_descriptor
)
34224 fputs (".", data
->file
);
34225 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
34226 fputs (":\n", data
->file
);
34231 #ifdef HAVE_GAS_HIDDEN
34232 /* Helper function to calculate visibility of a DECL
34233 and return the value as a const string. */
34235 static const char *
34236 rs6000_xcoff_visibility (tree decl
)
34238 static const char * const visibility_types
[] = {
34239 "", ",protected", ",hidden", ",internal"
34242 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
34244 if (TREE_CODE (decl
) == FUNCTION_DECL
34245 && cgraph_node::get (decl
)
34246 && cgraph_node::get (decl
)->instrumentation_clone
34247 && cgraph_node::get (decl
)->instrumented_version
)
34248 vis
= DECL_VISIBILITY (cgraph_node::get (decl
)->instrumented_version
->decl
);
34250 return visibility_types
[vis
];
34255 /* This macro produces the initial definition of a function name.
34256 On the RS/6000, we need to place an extra '.' in the function name and
34257 output the function descriptor.
34258 Dollar signs are converted to underscores.
34260 The csect for the function will have already been created when
34261 text_section was selected. We do have to go back to that csect, however.
34263 The third and fourth parameters to the .function pseudo-op (16 and 044)
34264 are placeholders which no longer have any use.
34266 Because AIX assembler's .set command has unexpected semantics, we output
34267 all aliases as alternative labels in front of the definition. */
34270 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
34272 char *buffer
= (char *) alloca (strlen (name
) + 1);
34274 int dollar_inside
= 0;
34275 struct declare_alias_data data
= {file
, false};
34277 strcpy (buffer
, name
);
34278 p
= strchr (buffer
, '$');
34282 p
= strchr (p
+ 1, '$');
34284 if (TREE_PUBLIC (decl
))
34286 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
34288 if (dollar_inside
) {
34289 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34290 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34292 fputs ("\t.globl .", file
);
34293 RS6000_OUTPUT_BASENAME (file
, buffer
);
34294 #ifdef HAVE_GAS_HIDDEN
34295 fputs (rs6000_xcoff_visibility (decl
), file
);
34302 if (dollar_inside
) {
34303 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34304 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34306 fputs ("\t.lglobl .", file
);
34307 RS6000_OUTPUT_BASENAME (file
, buffer
);
34310 fputs ("\t.csect ", file
);
34311 RS6000_OUTPUT_BASENAME (file
, buffer
);
34312 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
34313 RS6000_OUTPUT_BASENAME (file
, buffer
);
34314 fputs (":\n", file
);
34315 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34317 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
34318 RS6000_OUTPUT_BASENAME (file
, buffer
);
34319 fputs (", TOC[tc0], 0\n", file
);
34321 switch_to_section (function_section (decl
));
34323 RS6000_OUTPUT_BASENAME (file
, buffer
);
34324 fputs (":\n", file
);
34325 data
.function_descriptor
= true;
34326 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34328 if (!DECL_IGNORED_P (decl
))
34330 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
34331 xcoffout_declare_function (file
, decl
, buffer
);
34332 else if (write_symbols
== DWARF2_DEBUG
)
34334 name
= (*targetm
.strip_name_encoding
) (name
);
34335 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
34342 /* Output assembly language to globalize a symbol from a DECL,
34343 possibly with visibility. */
34346 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
34348 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
34349 fputs (GLOBAL_ASM_OP
, stream
);
34350 RS6000_OUTPUT_BASENAME (stream
, name
);
34351 #ifdef HAVE_GAS_HIDDEN
34352 fputs (rs6000_xcoff_visibility (decl
), stream
);
34354 putc ('\n', stream
);
34357 /* Output assembly language to define a symbol as COMMON from a DECL,
34358 possibly with visibility. */
34361 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
34362 tree decl ATTRIBUTE_UNUSED
,
34364 unsigned HOST_WIDE_INT size
,
34365 unsigned HOST_WIDE_INT align
)
34367 unsigned HOST_WIDE_INT align2
= 2;
34370 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
34374 fputs (COMMON_ASM_OP
, stream
);
34375 RS6000_OUTPUT_BASENAME (stream
, name
);
34378 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
34381 #ifdef HAVE_GAS_HIDDEN
34383 fputs (rs6000_xcoff_visibility (decl
), stream
);
34385 putc ('\n', stream
);
34388 /* This macro produces the initial definition of a object (variable) name.
34389 Because AIX assembler's .set command has unexpected semantics, we output
34390 all aliases as alternative labels in front of the definition. */
34393 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
34395 struct declare_alias_data data
= {file
, false};
34396 RS6000_OUTPUT_BASENAME (file
, name
);
34397 fputs (":\n", file
);
34398 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34402 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34405 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
34407 fputs (integer_asm_op (size
, FALSE
), file
);
34408 assemble_name (file
, label
);
34409 fputs ("-$", file
);
34412 /* Output a symbol offset relative to the dbase for the current object.
34413 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34416 __gcc_unwind_dbase is embedded in all executables/libraries through
34417 libgcc/config/rs6000/crtdbase.S. */
34420 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
34422 fputs (integer_asm_op (size
, FALSE
), file
);
34423 assemble_name (file
, label
);
34424 fputs("-__gcc_unwind_dbase", file
);
34429 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
34433 const char *symname
;
34435 default_encode_section_info (decl
, rtl
, first
);
34437 /* Careful not to prod global register variables. */
34440 symbol
= XEXP (rtl
, 0);
34441 if (GET_CODE (symbol
) != SYMBOL_REF
)
34444 flags
= SYMBOL_REF_FLAGS (symbol
);
34446 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34447 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
34449 SYMBOL_REF_FLAGS (symbol
) = flags
;
34451 /* Append mapping class to extern decls. */
34452 symname
= XSTR (symbol
, 0);
34453 if (decl
/* sync condition with assemble_external () */
34454 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
34455 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
34456 || TREE_CODE (decl
) == FUNCTION_DECL
)
34457 && symname
[strlen (symname
) - 1] != ']')
34459 char *newname
= (char *) alloca (strlen (symname
) + 5);
34460 strcpy (newname
, symname
);
34461 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
34462 ? "[DS]" : "[UA]"));
34463 XSTR (symbol
, 0) = ggc_strdup (newname
);
34466 #endif /* HAVE_AS_TLS */
34467 #endif /* TARGET_XCOFF */
34470 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
34471 const char *name
, const char *val
)
34473 fputs ("\t.weak\t", stream
);
34474 RS6000_OUTPUT_BASENAME (stream
, name
);
34475 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34476 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34479 fputs ("[DS]", stream
);
34480 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34482 fputs (rs6000_xcoff_visibility (decl
), stream
);
34484 fputs ("\n\t.weak\t.", stream
);
34485 RS6000_OUTPUT_BASENAME (stream
, name
);
34487 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34489 fputs (rs6000_xcoff_visibility (decl
), stream
);
34491 fputc ('\n', stream
);
34494 #ifdef ASM_OUTPUT_DEF
34495 ASM_OUTPUT_DEF (stream
, name
, val
);
34497 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34498 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34500 fputs ("\t.set\t.", stream
);
34501 RS6000_OUTPUT_BASENAME (stream
, name
);
34502 fputs (",.", stream
);
34503 RS6000_OUTPUT_BASENAME (stream
, val
);
34504 fputc ('\n', stream
);
34510 /* Return true if INSN should not be copied. */
34513 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
34515 return recog_memoized (insn
) >= 0
34516 && get_attr_cannot_copy (insn
);
34519 /* Compute a (partial) cost for rtx X. Return true if the complete
34520 cost has been computed, and false if subexpressions should be
34521 scanned. In either case, *TOTAL contains the cost result. */
34524 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34525 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
34527 int code
= GET_CODE (x
);
34531 /* On the RS/6000, if it is valid in the insn, it is free. */
34533 if (((outer_code
== SET
34534 || outer_code
== PLUS
34535 || outer_code
== MINUS
)
34536 && (satisfies_constraint_I (x
)
34537 || satisfies_constraint_L (x
)))
34538 || (outer_code
== AND
34539 && (satisfies_constraint_K (x
)
34541 ? satisfies_constraint_L (x
)
34542 : satisfies_constraint_J (x
))))
34543 || ((outer_code
== IOR
|| outer_code
== XOR
)
34544 && (satisfies_constraint_K (x
)
34546 ? satisfies_constraint_L (x
)
34547 : satisfies_constraint_J (x
))))
34548 || outer_code
== ASHIFT
34549 || outer_code
== ASHIFTRT
34550 || outer_code
== LSHIFTRT
34551 || outer_code
== ROTATE
34552 || outer_code
== ROTATERT
34553 || outer_code
== ZERO_EXTRACT
34554 || (outer_code
== MULT
34555 && satisfies_constraint_I (x
))
34556 || ((outer_code
== DIV
|| outer_code
== UDIV
34557 || outer_code
== MOD
|| outer_code
== UMOD
)
34558 && exact_log2 (INTVAL (x
)) >= 0)
34559 || (outer_code
== COMPARE
34560 && (satisfies_constraint_I (x
)
34561 || satisfies_constraint_K (x
)))
34562 || ((outer_code
== EQ
|| outer_code
== NE
)
34563 && (satisfies_constraint_I (x
)
34564 || satisfies_constraint_K (x
)
34566 ? satisfies_constraint_L (x
)
34567 : satisfies_constraint_J (x
))))
34568 || (outer_code
== GTU
34569 && satisfies_constraint_I (x
))
34570 || (outer_code
== LTU
34571 && satisfies_constraint_P (x
)))
34576 else if ((outer_code
== PLUS
34577 && reg_or_add_cint_operand (x
, VOIDmode
))
34578 || (outer_code
== MINUS
34579 && reg_or_sub_cint_operand (x
, VOIDmode
))
34580 || ((outer_code
== SET
34581 || outer_code
== IOR
34582 || outer_code
== XOR
)
34584 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
34586 *total
= COSTS_N_INSNS (1);
34592 case CONST_WIDE_INT
:
34596 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34600 /* When optimizing for size, MEM should be slightly more expensive
34601 than generating address, e.g., (plus (reg) (const)).
34602 L1 cache latency is about two instructions. */
34603 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34604 if (rs6000_slow_unaligned_access (mode
, MEM_ALIGN (x
)))
34605 *total
+= COSTS_N_INSNS (100);
34614 if (FLOAT_MODE_P (mode
))
34615 *total
= rs6000_cost
->fp
;
34617 *total
= COSTS_N_INSNS (1);
34621 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34622 && satisfies_constraint_I (XEXP (x
, 1)))
34624 if (INTVAL (XEXP (x
, 1)) >= -256
34625 && INTVAL (XEXP (x
, 1)) <= 255)
34626 *total
= rs6000_cost
->mulsi_const9
;
34628 *total
= rs6000_cost
->mulsi_const
;
34630 else if (mode
== SFmode
)
34631 *total
= rs6000_cost
->fp
;
34632 else if (FLOAT_MODE_P (mode
))
34633 *total
= rs6000_cost
->dmul
;
34634 else if (mode
== DImode
)
34635 *total
= rs6000_cost
->muldi
;
34637 *total
= rs6000_cost
->mulsi
;
34641 if (mode
== SFmode
)
34642 *total
= rs6000_cost
->fp
;
34644 *total
= rs6000_cost
->dmul
;
34649 if (FLOAT_MODE_P (mode
))
34651 *total
= mode
== DFmode
? rs6000_cost
->ddiv
34652 : rs6000_cost
->sdiv
;
34659 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34660 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
34662 if (code
== DIV
|| code
== MOD
)
34664 *total
= COSTS_N_INSNS (2);
34667 *total
= COSTS_N_INSNS (1);
34671 if (GET_MODE (XEXP (x
, 1)) == DImode
)
34672 *total
= rs6000_cost
->divdi
;
34674 *total
= rs6000_cost
->divsi
;
34676 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34677 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
34678 *total
+= COSTS_N_INSNS (2);
34682 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34686 *total
= COSTS_N_INSNS (4);
34690 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34694 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34698 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34701 *total
= COSTS_N_INSNS (1);
34705 if (CONST_INT_P (XEXP (x
, 1)))
34707 rtx left
= XEXP (x
, 0);
34708 rtx_code left_code
= GET_CODE (left
);
34710 /* rotate-and-mask: 1 insn. */
34711 if ((left_code
== ROTATE
34712 || left_code
== ASHIFT
34713 || left_code
== LSHIFTRT
)
34714 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34716 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34717 if (!CONST_INT_P (XEXP (left
, 1)))
34718 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34719 *total
+= COSTS_N_INSNS (1);
34723 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34724 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34725 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34726 || (val
& 0xffff) == val
34727 || (val
& 0xffff0000) == val
34728 || ((val
& 0xffff) == 0 && mode
== SImode
))
34730 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34731 *total
+= COSTS_N_INSNS (1);
34736 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34738 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34739 *total
+= COSTS_N_INSNS (2);
34744 *total
= COSTS_N_INSNS (1);
34749 *total
= COSTS_N_INSNS (1);
34755 *total
= COSTS_N_INSNS (1);
34759 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34760 the sign extend and shift separately within the insn. */
34761 if (TARGET_EXTSWSLI
&& mode
== DImode
34762 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34763 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34774 /* Handle mul_highpart. */
34775 if (outer_code
== TRUNCATE
34776 && GET_CODE (XEXP (x
, 0)) == MULT
)
34778 if (mode
== DImode
)
34779 *total
= rs6000_cost
->muldi
;
34781 *total
= rs6000_cost
->mulsi
;
34784 else if (outer_code
== AND
)
34787 *total
= COSTS_N_INSNS (1);
34792 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34795 *total
= COSTS_N_INSNS (1);
34801 if (!FLOAT_MODE_P (mode
))
34803 *total
= COSTS_N_INSNS (1);
34809 case UNSIGNED_FLOAT
:
34812 case FLOAT_TRUNCATE
:
34813 *total
= rs6000_cost
->fp
;
34817 if (mode
== DFmode
)
34818 *total
= rs6000_cost
->sfdf_convert
;
34820 *total
= rs6000_cost
->fp
;
34824 switch (XINT (x
, 1))
34827 *total
= rs6000_cost
->fp
;
34839 *total
= COSTS_N_INSNS (1);
34842 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34844 *total
= rs6000_cost
->fp
;
34853 /* Carry bit requires mode == Pmode.
34854 NEG or PLUS already counted so only add one. */
34856 && (outer_code
== NEG
|| outer_code
== PLUS
))
34858 *total
= COSTS_N_INSNS (1);
34861 if (outer_code
== SET
)
34863 if (XEXP (x
, 1) == const0_rtx
)
34865 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34866 *total
= COSTS_N_INSNS (8);
34868 *total
= COSTS_N_INSNS (2);
34873 *total
= COSTS_N_INSNS (3);
34882 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
34884 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34885 *total
= COSTS_N_INSNS (8);
34887 *total
= COSTS_N_INSNS (2);
34891 if (outer_code
== COMPARE
)
34905 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34908 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34909 int opno
, int *total
, bool speed
)
34911 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34914 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34915 "opno = %d, total = %d, speed = %s, x:\n",
34916 ret
? "complete" : "scan inner",
34917 GET_MODE_NAME (mode
),
34918 GET_RTX_NAME (outer_code
),
34921 speed
? "true" : "false");
34929 rs6000_insn_cost (rtx_insn
*insn
, bool speed
)
34931 if (recog_memoized (insn
) < 0)
34935 return get_attr_length (insn
);
34937 int cost
= get_attr_cost (insn
);
34941 int n
= get_attr_length (insn
) / 4;
34942 enum attr_type type
= get_attr_type (insn
);
34949 cost
= COSTS_N_INSNS (n
+ 1);
34953 switch (get_attr_size (insn
))
34956 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const9
;
34959 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const
;
34962 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi
;
34965 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->muldi
;
34968 gcc_unreachable ();
34972 switch (get_attr_size (insn
))
34975 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divsi
;
34978 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divdi
;
34981 gcc_unreachable ();
34986 cost
= n
* rs6000_cost
->fp
;
34989 cost
= n
* rs6000_cost
->dmul
;
34992 cost
= n
* rs6000_cost
->sdiv
;
34995 cost
= n
* rs6000_cost
->ddiv
;
35000 cost
= COSTS_N_INSNS (n
+ 2);
35004 cost
= COSTS_N_INSNS (n
);
35010 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35013 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
35014 addr_space_t as
, bool speed
)
35016 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
35018 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35019 ret
, speed
? "true" : "false");
35026 /* A C expression returning the cost of moving data from a register of class
35027 CLASS1 to one of CLASS2. */
35030 rs6000_register_move_cost (machine_mode mode
,
35031 reg_class_t from
, reg_class_t to
)
35035 if (TARGET_DEBUG_COST
)
35038 /* Moves from/to GENERAL_REGS. */
35039 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
35040 || reg_classes_intersect_p (from
, GENERAL_REGS
))
35042 reg_class_t rclass
= from
;
35044 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
35047 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
35048 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
35049 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
35051 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35053 else if (rclass
== CR_REGS
)
35056 /* For those processors that have slow LR/CTR moves, make them more
35057 expensive than memory in order to bias spills to memory .*/
35058 else if ((rs6000_cpu
== PROCESSOR_POWER6
35059 || rs6000_cpu
== PROCESSOR_POWER7
35060 || rs6000_cpu
== PROCESSOR_POWER8
35061 || rs6000_cpu
== PROCESSOR_POWER9
)
35062 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
35063 ret
= 6 * hard_regno_nregs (0, mode
);
35066 /* A move will cost one instruction per GPR moved. */
35067 ret
= 2 * hard_regno_nregs (0, mode
);
35070 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35071 else if (VECTOR_MEM_VSX_P (mode
)
35072 && reg_classes_intersect_p (to
, VSX_REGS
)
35073 && reg_classes_intersect_p (from
, VSX_REGS
))
35074 ret
= 2 * hard_regno_nregs (FIRST_FPR_REGNO
, mode
);
35076 /* Moving between two similar registers is just one instruction. */
35077 else if (reg_classes_intersect_p (to
, from
))
35078 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
35080 /* Everything else has to go through GENERAL_REGS. */
35082 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
35083 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
35085 if (TARGET_DEBUG_COST
)
35087 if (dbg_cost_ctrl
== 1)
35089 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35090 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
35091 reg_class_names
[to
]);
35098 /* A C expressions returning the cost of moving data of MODE from a register to
35102 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
35103 bool in ATTRIBUTE_UNUSED
)
35107 if (TARGET_DEBUG_COST
)
35110 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
35111 ret
= 4 * hard_regno_nregs (0, mode
);
35112 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
35113 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
35114 ret
= 4 * hard_regno_nregs (32, mode
);
35115 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
35116 ret
= 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO
, mode
);
35118 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
35120 if (TARGET_DEBUG_COST
)
35122 if (dbg_cost_ctrl
== 1)
35124 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35125 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
35132 /* Returns a code for a target-specific builtin that implements
35133 reciprocal of the function, or NULL_TREE if not available. */
35136 rs6000_builtin_reciprocal (tree fndecl
)
35138 switch (DECL_FUNCTION_CODE (fndecl
))
35140 case VSX_BUILTIN_XVSQRTDP
:
35141 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
35144 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
35146 case VSX_BUILTIN_XVSQRTSP
:
35147 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
35150 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
35157 /* Load up a constant. If the mode is a vector mode, splat the value across
35158 all of the vector elements. */
35161 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
35165 if (mode
== SFmode
|| mode
== DFmode
)
35167 rtx d
= const_double_from_real_value (dconst
, mode
);
35168 reg
= force_reg (mode
, d
);
35170 else if (mode
== V4SFmode
)
35172 rtx d
= const_double_from_real_value (dconst
, SFmode
);
35173 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
35174 reg
= gen_reg_rtx (mode
);
35175 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
35177 else if (mode
== V2DFmode
)
35179 rtx d
= const_double_from_real_value (dconst
, DFmode
);
35180 rtvec v
= gen_rtvec (2, d
, d
);
35181 reg
= gen_reg_rtx (mode
);
35182 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
35185 gcc_unreachable ();
35190 /* Generate an FMA instruction. */
35193 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
35195 machine_mode mode
= GET_MODE (target
);
35198 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
35199 gcc_assert (dst
!= NULL
);
35202 emit_move_insn (target
, dst
);
35205 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35208 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
35210 machine_mode mode
= GET_MODE (dst
);
35213 /* This is a tad more complicated, since the fnma_optab is for
35214 a different expression: fma(-m1, m2, a), which is the same
35215 thing except in the case of signed zeros.
35217 Fortunately we know that if FMA is supported that FNMSUB is
35218 also supported in the ISA. Just expand it directly. */
35220 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
35222 r
= gen_rtx_NEG (mode
, a
);
35223 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
35224 r
= gen_rtx_NEG (mode
, r
);
35225 emit_insn (gen_rtx_SET (dst
, r
));
35228 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35229 add a reg_note saying that this was a division. Support both scalar and
35230 vector divide. Assumes no trapping math and finite arguments. */
35233 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
35235 machine_mode mode
= GET_MODE (dst
);
35236 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
35239 /* Low precision estimates guarantee 5 bits of accuracy. High
35240 precision estimates guarantee 14 bits of accuracy. SFmode
35241 requires 23 bits of accuracy. DFmode requires 52 bits of
35242 accuracy. Each pass at least doubles the accuracy, leading
35243 to the following. */
35244 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35245 if (mode
== DFmode
|| mode
== V2DFmode
)
35248 enum insn_code code
= optab_handler (smul_optab
, mode
);
35249 insn_gen_fn gen_mul
= GEN_FCN (code
);
35251 gcc_assert (code
!= CODE_FOR_nothing
);
35253 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
35255 /* x0 = 1./d estimate */
35256 x0
= gen_reg_rtx (mode
);
35257 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
35260 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35263 /* e0 = 1. - d * x0 */
35264 e0
= gen_reg_rtx (mode
);
35265 rs6000_emit_nmsub (e0
, d
, x0
, one
);
35267 /* x1 = x0 + e0 * x0 */
35268 x1
= gen_reg_rtx (mode
);
35269 rs6000_emit_madd (x1
, e0
, x0
, x0
);
35271 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
35272 ++i
, xprev
= xnext
, eprev
= enext
) {
35274 /* enext = eprev * eprev */
35275 enext
= gen_reg_rtx (mode
);
35276 emit_insn (gen_mul (enext
, eprev
, eprev
));
35278 /* xnext = xprev + enext * xprev */
35279 xnext
= gen_reg_rtx (mode
);
35280 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
35286 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35288 /* u = n * xprev */
35289 u
= gen_reg_rtx (mode
);
35290 emit_insn (gen_mul (u
, n
, xprev
));
35292 /* v = n - (d * u) */
35293 v
= gen_reg_rtx (mode
);
35294 rs6000_emit_nmsub (v
, d
, u
, n
);
35296 /* dst = (v * xprev) + u */
35297 rs6000_emit_madd (dst
, v
, xprev
, u
);
35300 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
35303 /* Goldschmidt's Algorithm for single/double-precision floating point
35304 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35307 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
35309 machine_mode mode
= GET_MODE (src
);
35310 rtx e
= gen_reg_rtx (mode
);
35311 rtx g
= gen_reg_rtx (mode
);
35312 rtx h
= gen_reg_rtx (mode
);
35314 /* Low precision estimates guarantee 5 bits of accuracy. High
35315 precision estimates guarantee 14 bits of accuracy. SFmode
35316 requires 23 bits of accuracy. DFmode requires 52 bits of
35317 accuracy. Each pass at least doubles the accuracy, leading
35318 to the following. */
35319 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35320 if (mode
== DFmode
|| mode
== V2DFmode
)
35325 enum insn_code code
= optab_handler (smul_optab
, mode
);
35326 insn_gen_fn gen_mul
= GEN_FCN (code
);
35328 gcc_assert (code
!= CODE_FOR_nothing
);
35330 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
35332 /* e = rsqrt estimate */
35333 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
35336 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35339 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
35341 if (mode
== SFmode
)
35343 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
35346 emit_move_insn (e
, target
);
35350 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
35351 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
35355 /* g = sqrt estimate. */
35356 emit_insn (gen_mul (g
, e
, src
));
35357 /* h = 1/(2*sqrt) estimate. */
35358 emit_insn (gen_mul (h
, e
, mhalf
));
35364 rtx t
= gen_reg_rtx (mode
);
35365 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35366 /* Apply correction directly to 1/rsqrt estimate. */
35367 rs6000_emit_madd (dst
, e
, t
, e
);
35371 for (i
= 0; i
< passes
; i
++)
35373 rtx t1
= gen_reg_rtx (mode
);
35374 rtx g1
= gen_reg_rtx (mode
);
35375 rtx h1
= gen_reg_rtx (mode
);
35377 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
35378 rs6000_emit_madd (g1
, g
, t1
, g
);
35379 rs6000_emit_madd (h1
, h
, t1
, h
);
35384 /* Multiply by 2 for 1/rsqrt. */
35385 emit_insn (gen_add3_insn (dst
, h
, h
));
35390 rtx t
= gen_reg_rtx (mode
);
35391 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35392 rs6000_emit_madd (dst
, g
, t
, g
);
35398 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35399 (Power7) targets. DST is the target, and SRC is the argument operand. */
35402 rs6000_emit_popcount (rtx dst
, rtx src
)
35404 machine_mode mode
= GET_MODE (dst
);
35407 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35408 if (TARGET_POPCNTD
)
35410 if (mode
== SImode
)
35411 emit_insn (gen_popcntdsi2 (dst
, src
));
35413 emit_insn (gen_popcntddi2 (dst
, src
));
35417 tmp1
= gen_reg_rtx (mode
);
35419 if (mode
== SImode
)
35421 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35422 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
35424 tmp2
= force_reg (SImode
, tmp2
);
35425 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
35429 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35430 tmp2
= expand_mult (DImode
, tmp1
,
35431 GEN_INT ((HOST_WIDE_INT
)
35432 0x01010101 << 32 | 0x01010101),
35434 tmp2
= force_reg (DImode
, tmp2
);
35435 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
35440 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35441 target, and SRC is the argument operand. */
35444 rs6000_emit_parity (rtx dst
, rtx src
)
35446 machine_mode mode
= GET_MODE (dst
);
35449 tmp
= gen_reg_rtx (mode
);
35451 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35454 if (mode
== SImode
)
35456 emit_insn (gen_popcntbsi2 (tmp
, src
));
35457 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
35461 emit_insn (gen_popcntbdi2 (tmp
, src
));
35462 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
35467 if (mode
== SImode
)
35469 /* Is mult+shift >= shift+xor+shift+xor? */
35470 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
35472 rtx tmp1
, tmp2
, tmp3
, tmp4
;
35474 tmp1
= gen_reg_rtx (SImode
);
35475 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35477 tmp2
= gen_reg_rtx (SImode
);
35478 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
35479 tmp3
= gen_reg_rtx (SImode
);
35480 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
35482 tmp4
= gen_reg_rtx (SImode
);
35483 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
35484 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
35487 rs6000_emit_popcount (tmp
, src
);
35488 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
35492 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35493 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
35495 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
35497 tmp1
= gen_reg_rtx (DImode
);
35498 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35500 tmp2
= gen_reg_rtx (DImode
);
35501 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
35502 tmp3
= gen_reg_rtx (DImode
);
35503 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
35505 tmp4
= gen_reg_rtx (DImode
);
35506 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
35507 tmp5
= gen_reg_rtx (DImode
);
35508 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
35510 tmp6
= gen_reg_rtx (DImode
);
35511 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
35512 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
35515 rs6000_emit_popcount (tmp
, src
);
35516 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
35520 /* Expand an Altivec constant permutation for little endian mode.
35521 There are two issues: First, the two input operands must be
35522 swapped so that together they form a double-wide array in LE
35523 order. Second, the vperm instruction has surprising behavior
35524 in LE mode: it interprets the elements of the source vectors
35525 in BE mode ("left to right") and interprets the elements of
35526 the destination vector in LE mode ("right to left"). To
35527 correct for this, we must subtract each element of the permute
35528 control vector from 31.
35530 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35531 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35532 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35533 serve as the permute control vector. Then, in BE mode,
35537 places the desired result in vr9. However, in LE mode the
35538 vector contents will be
35540 vr10 = 00000003 00000002 00000001 00000000
35541 vr11 = 00000007 00000006 00000005 00000004
35543 The result of the vperm using the same permute control vector is
35545 vr9 = 05000000 07000000 01000000 03000000
35547 That is, the leftmost 4 bytes of vr10 are interpreted as the
35548 source for the rightmost 4 bytes of vr9, and so on.
35550 If we change the permute control vector to
35552 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35560 vr9 = 00000006 00000004 00000002 00000000. */
35563 altivec_expand_vec_perm_const_le (rtx operands
[4])
35567 rtx constv
, unspec
;
35568 rtx target
= operands
[0];
35569 rtx op0
= operands
[1];
35570 rtx op1
= operands
[2];
35571 rtx sel
= operands
[3];
35573 /* Unpack and adjust the constant selector. */
35574 for (i
= 0; i
< 16; ++i
)
35576 rtx e
= XVECEXP (sel
, 0, i
);
35577 unsigned int elt
= 31 - (INTVAL (e
) & 31);
35578 perm
[i
] = GEN_INT (elt
);
35581 /* Expand to a permute, swapping the inputs and using the
35582 adjusted selector. */
35584 op0
= force_reg (V16QImode
, op0
);
35586 op1
= force_reg (V16QImode
, op1
);
35588 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
35589 constv
= force_reg (V16QImode
, constv
);
35590 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
35592 if (!REG_P (target
))
35594 rtx tmp
= gen_reg_rtx (V16QImode
);
35595 emit_move_insn (tmp
, unspec
);
35599 emit_move_insn (target
, unspec
);
35602 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35603 permute control vector. But here it's not a constant, so we must
35604 generate a vector NAND or NOR to do the adjustment. */
35607 altivec_expand_vec_perm_le (rtx operands
[4])
35609 rtx notx
, iorx
, unspec
;
35610 rtx target
= operands
[0];
35611 rtx op0
= operands
[1];
35612 rtx op1
= operands
[2];
35613 rtx sel
= operands
[3];
35615 rtx norreg
= gen_reg_rtx (V16QImode
);
35616 machine_mode mode
= GET_MODE (target
);
35618 /* Get everything in regs so the pattern matches. */
35620 op0
= force_reg (mode
, op0
);
35622 op1
= force_reg (mode
, op1
);
35624 sel
= force_reg (V16QImode
, sel
);
35625 if (!REG_P (target
))
35626 tmp
= gen_reg_rtx (mode
);
35628 if (TARGET_P9_VECTOR
)
35630 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op0
, op1
, sel
),
35635 /* Invert the selector with a VNAND if available, else a VNOR.
35636 The VNAND is preferred for future fusion opportunities. */
35637 notx
= gen_rtx_NOT (V16QImode
, sel
);
35638 iorx
= (TARGET_P8_VECTOR
35639 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
35640 : gen_rtx_AND (V16QImode
, notx
, notx
));
35641 emit_insn (gen_rtx_SET (norreg
, iorx
));
35643 /* Permute with operands reversed and adjusted selector. */
35644 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
35648 /* Copy into target, possibly by way of a register. */
35649 if (!REG_P (target
))
35651 emit_move_insn (tmp
, unspec
);
35655 emit_move_insn (target
, unspec
);
35658 /* Expand an Altivec constant permutation. Return true if we match
35659 an efficient implementation; false to fall back to VPERM. */
35662 altivec_expand_vec_perm_const (rtx operands
[4])
35664 struct altivec_perm_insn
{
35665 HOST_WIDE_INT mask
;
35666 enum insn_code impl
;
35667 unsigned char perm
[16];
35669 static const struct altivec_perm_insn patterns
[] = {
35670 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
35671 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35672 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
35673 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35674 { OPTION_MASK_ALTIVEC
,
35675 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
35676 : CODE_FOR_altivec_vmrglb_direct
),
35677 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35678 { OPTION_MASK_ALTIVEC
,
35679 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
35680 : CODE_FOR_altivec_vmrglh_direct
),
35681 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35682 { OPTION_MASK_ALTIVEC
,
35683 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
35684 : CODE_FOR_altivec_vmrglw_direct
),
35685 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35686 { OPTION_MASK_ALTIVEC
,
35687 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
35688 : CODE_FOR_altivec_vmrghb_direct
),
35689 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35690 { OPTION_MASK_ALTIVEC
,
35691 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35692 : CODE_FOR_altivec_vmrghh_direct
),
35693 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35694 { OPTION_MASK_ALTIVEC
,
35695 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35696 : CODE_FOR_altivec_vmrghw_direct
),
35697 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35698 { OPTION_MASK_P8_VECTOR
,
35699 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35700 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35701 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35702 { OPTION_MASK_P8_VECTOR
,
35703 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35704 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35705 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35708 unsigned int i
, j
, elt
, which
;
35709 unsigned char perm
[16];
35710 rtx target
, op0
, op1
, sel
, x
;
35713 target
= operands
[0];
35718 /* Unpack the constant selector. */
35719 for (i
= which
= 0; i
< 16; ++i
)
35721 rtx e
= XVECEXP (sel
, 0, i
);
35722 elt
= INTVAL (e
) & 31;
35723 which
|= (elt
< 16 ? 1 : 2);
35727 /* Simplify the constant selector based on operands. */
35731 gcc_unreachable ();
35735 if (!rtx_equal_p (op0
, op1
))
35740 for (i
= 0; i
< 16; ++i
)
35752 /* Look for splat patterns. */
35757 for (i
= 0; i
< 16; ++i
)
35758 if (perm
[i
] != elt
)
35762 if (!BYTES_BIG_ENDIAN
)
35764 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35770 for (i
= 0; i
< 16; i
+= 2)
35771 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35775 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35776 x
= gen_reg_rtx (V8HImode
);
35777 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35779 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35786 for (i
= 0; i
< 16; i
+= 4)
35788 || perm
[i
+ 1] != elt
+ 1
35789 || perm
[i
+ 2] != elt
+ 2
35790 || perm
[i
+ 3] != elt
+ 3)
35794 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35795 x
= gen_reg_rtx (V4SImode
);
35796 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35798 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35804 /* Look for merge and pack patterns. */
35805 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35809 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35812 elt
= patterns
[j
].perm
[0];
35813 if (perm
[0] == elt
)
35815 else if (perm
[0] == elt
+ 16)
35819 for (i
= 1; i
< 16; ++i
)
35821 elt
= patterns
[j
].perm
[i
];
35823 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35824 else if (one_vec
&& elt
>= 16)
35826 if (perm
[i
] != elt
)
35831 enum insn_code icode
= patterns
[j
].impl
;
35832 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35833 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35835 /* For little-endian, don't use vpkuwum and vpkuhum if the
35836 underlying vector type is not V4SI and V8HI, respectively.
35837 For example, using vpkuwum with a V8HI picks up the even
35838 halfwords (BE numbering) when the even halfwords (LE
35839 numbering) are what we need. */
35840 if (!BYTES_BIG_ENDIAN
35841 && icode
== CODE_FOR_altivec_vpkuwum_direct
35842 && ((GET_CODE (op0
) == REG
35843 && GET_MODE (op0
) != V4SImode
)
35844 || (GET_CODE (op0
) == SUBREG
35845 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35847 if (!BYTES_BIG_ENDIAN
35848 && icode
== CODE_FOR_altivec_vpkuhum_direct
35849 && ((GET_CODE (op0
) == REG
35850 && GET_MODE (op0
) != V8HImode
)
35851 || (GET_CODE (op0
) == SUBREG
35852 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35855 /* For little-endian, the two input operands must be swapped
35856 (or swapped back) to ensure proper right-to-left numbering
35858 if (swapped
^ !BYTES_BIG_ENDIAN
)
35859 std::swap (op0
, op1
);
35860 if (imode
!= V16QImode
)
35862 op0
= gen_lowpart (imode
, op0
);
35863 op1
= gen_lowpart (imode
, op1
);
35865 if (omode
== V16QImode
)
35868 x
= gen_reg_rtx (omode
);
35869 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35870 if (omode
!= V16QImode
)
35871 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35876 if (!BYTES_BIG_ENDIAN
)
35878 altivec_expand_vec_perm_const_le (operands
);
35885 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35886 Return true if we match an efficient implementation. */
35889 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35890 unsigned char perm0
, unsigned char perm1
)
35894 /* If both selectors come from the same operand, fold to single op. */
35895 if ((perm0
& 2) == (perm1
& 2))
35902 /* If both operands are equal, fold to simpler permutation. */
35903 if (rtx_equal_p (op0
, op1
))
35906 perm1
= (perm1
& 1) + 2;
35908 /* If the first selector comes from the second operand, swap. */
35909 else if (perm0
& 2)
35915 std::swap (op0
, op1
);
35917 /* If the second selector does not come from the second operand, fail. */
35918 else if ((perm1
& 2) == 0)
35922 if (target
!= NULL
)
35924 machine_mode vmode
, dmode
;
35927 vmode
= GET_MODE (target
);
35928 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35929 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4).require ();
35930 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35931 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35932 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35933 emit_insn (gen_rtx_SET (target
, x
));
35939 rs6000_expand_vec_perm_const (rtx operands
[4])
35941 rtx target
, op0
, op1
, sel
;
35942 unsigned char perm0
, perm1
;
35944 target
= operands
[0];
35949 /* Unpack the constant selector. */
35950 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
35951 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
35953 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
35956 /* Test whether a constant permutation is supported. */
35959 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode
, vec_perm_indices sel
)
35961 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35962 if (TARGET_ALTIVEC
)
35965 /* Check for ps_merge* or evmerge* insns. */
35966 if (TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
35968 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
35969 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
35970 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
35976 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35979 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
35980 machine_mode vmode
, unsigned nelt
, rtx perm
[])
35982 machine_mode imode
;
35986 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
35987 imode
= mode_for_int_vector (vmode
).require ();
35989 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
35990 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
35992 emit_move_insn (target
, x
);
35995 /* Expand an extract even operation. */
35998 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
36000 machine_mode vmode
= GET_MODE (target
);
36001 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
36004 for (i
= 0; i
< nelt
; i
++)
36005 perm
[i
] = GEN_INT (i
* 2);
36007 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
36010 /* Expand a vector interleave operation. */
36013 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
36015 machine_mode vmode
= GET_MODE (target
);
36016 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
36019 high
= (highp
? 0 : nelt
/ 2);
36020 for (i
= 0; i
< nelt
/ 2; i
++)
36022 perm
[i
* 2] = GEN_INT (i
+ high
);
36023 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
36026 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
36029 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36031 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
36033 HOST_WIDE_INT
hwi_scale (scale
);
36034 REAL_VALUE_TYPE r_pow
;
36035 rtvec v
= rtvec_alloc (2);
36037 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
36038 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
36039 elt
= const_double_from_real_value (r_pow
, DFmode
);
36040 RTVEC_ELT (v
, 0) = elt
;
36041 RTVEC_ELT (v
, 1) = elt
;
36042 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
36043 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
36046 /* Return an RTX representing where to find the function value of a
36047 function returning MODE. */
36049 rs6000_complex_function_value (machine_mode mode
)
36051 unsigned int regno
;
36053 machine_mode inner
= GET_MODE_INNER (mode
);
36054 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
36056 if (TARGET_FLOAT128_TYPE
36058 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
36059 regno
= ALTIVEC_ARG_RETURN
;
36061 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36062 regno
= FP_ARG_RETURN
;
36066 regno
= GP_ARG_RETURN
;
36068 /* 32-bit is OK since it'll go in r3/r4. */
36069 if (TARGET_32BIT
&& inner_bytes
>= 4)
36070 return gen_rtx_REG (mode
, regno
);
36073 if (inner_bytes
>= 8)
36074 return gen_rtx_REG (mode
, regno
);
36076 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
36078 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
36079 GEN_INT (inner_bytes
));
36080 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
36083 /* Return an rtx describing a return value of MODE as a PARALLEL
36084 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36085 stride REG_STRIDE. */
36088 rs6000_parallel_return (machine_mode mode
,
36089 int n_elts
, machine_mode elt_mode
,
36090 unsigned int regno
, unsigned int reg_stride
)
36092 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
36095 for (i
= 0; i
< n_elts
; i
++)
36097 rtx r
= gen_rtx_REG (elt_mode
, regno
);
36098 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
36099 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
36100 regno
+= reg_stride
;
36106 /* Target hook for TARGET_FUNCTION_VALUE.
36108 An integer value is in r3 and a floating-point value is in fp1,
36109 unless -msoft-float. */
36112 rs6000_function_value (const_tree valtype
,
36113 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
36114 bool outgoing ATTRIBUTE_UNUSED
)
36117 unsigned int regno
;
36118 machine_mode elt_mode
;
36121 /* Special handling for structs in darwin64. */
36123 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
36125 CUMULATIVE_ARGS valcum
;
36129 valcum
.fregno
= FP_ARG_MIN_REG
;
36130 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
36131 /* Do a trial code generation as if this were going to be passed as
36132 an argument; if any part goes in memory, we return NULL. */
36133 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
36136 /* Otherwise fall through to standard ABI rules. */
36139 mode
= TYPE_MODE (valtype
);
36141 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36142 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
36144 int first_reg
, n_regs
;
36146 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
36148 /* _Decimal128 must use even/odd register pairs. */
36149 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36150 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
36154 first_reg
= ALTIVEC_ARG_RETURN
;
36158 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
36161 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36162 if (TARGET_32BIT
&& TARGET_POWERPC64
)
36171 int count
= GET_MODE_SIZE (mode
) / 4;
36172 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
36175 if ((INTEGRAL_TYPE_P (valtype
)
36176 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
36177 || POINTER_TYPE_P (valtype
))
36178 mode
= TARGET_32BIT
? SImode
: DImode
;
36180 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36181 /* _Decimal128 must use an even/odd register pair. */
36182 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36183 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
36184 && !FLOAT128_VECTOR_P (mode
)
36185 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
36186 regno
= FP_ARG_RETURN
;
36187 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
36188 && targetm
.calls
.split_complex_arg
)
36189 return rs6000_complex_function_value (mode
);
36190 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36191 return register is used in both cases, and we won't see V2DImode/V2DFmode
36192 for pure altivec, combine the two cases. */
36193 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
36194 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
36195 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
36196 regno
= ALTIVEC_ARG_RETURN
;
36198 regno
= GP_ARG_RETURN
;
36200 return gen_rtx_REG (mode
, regno
);
36203 /* Define how to find the value returned by a library function
36204 assuming the value has mode MODE. */
36206 rs6000_libcall_value (machine_mode mode
)
36208 unsigned int regno
;
36210 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36211 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
36212 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
36214 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
36215 /* _Decimal128 must use an even/odd register pair. */
36216 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
36217 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
)
36218 && TARGET_HARD_FLOAT
36219 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
36220 regno
= FP_ARG_RETURN
;
36221 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36222 return register is used in both cases, and we won't see V2DImode/V2DFmode
36223 for pure altivec, combine the two cases. */
36224 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
36225 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
36226 regno
= ALTIVEC_ARG_RETURN
;
36227 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
36228 return rs6000_complex_function_value (mode
);
36230 regno
= GP_ARG_RETURN
;
36232 return gen_rtx_REG (mode
, regno
);
36235 /* Compute register pressure classes. We implement the target hook to avoid
36236 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36237 lead to incorrect estimates of number of available registers and therefor
36238 increased register pressure/spill. */
36240 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
36245 pressure_classes
[n
++] = GENERAL_REGS
;
36247 pressure_classes
[n
++] = VSX_REGS
;
36250 if (TARGET_ALTIVEC
)
36251 pressure_classes
[n
++] = ALTIVEC_REGS
;
36252 if (TARGET_HARD_FLOAT
)
36253 pressure_classes
[n
++] = FLOAT_REGS
;
36255 pressure_classes
[n
++] = CR_REGS
;
36256 pressure_classes
[n
++] = SPECIAL_REGS
;
36261 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36262 Frame pointer elimination is automatically handled.
36264 For the RS/6000, if frame pointer elimination is being done, we would like
36265 to convert ap into fp, not sp.
36267 We need r30 if -mminimal-toc was specified, and there are constant pool
36271 rs6000_can_eliminate (const int from
, const int to
)
36273 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
36274 ? ! frame_pointer_needed
36275 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
36276 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
36277 || constant_pool_empty_p ()
36281 /* Define the offset between two registers, FROM to be eliminated and its
36282 replacement TO, at the start of a routine. */
36284 rs6000_initial_elimination_offset (int from
, int to
)
36286 rs6000_stack_t
*info
= rs6000_stack_info ();
36287 HOST_WIDE_INT offset
;
36289 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36290 offset
= info
->push_p
? 0 : -info
->total_size
;
36291 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36293 offset
= info
->push_p
? 0 : -info
->total_size
;
36294 if (FRAME_GROWS_DOWNWARD
)
36295 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
36297 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36298 offset
= FRAME_GROWS_DOWNWARD
36299 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
36301 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36302 offset
= info
->total_size
;
36303 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36304 offset
= info
->push_p
? info
->total_size
: 0;
36305 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
36308 gcc_unreachable ();
36313 /* Fill in sizes of registers used by unwinder. */
36316 rs6000_init_dwarf_reg_sizes_extra (tree address
)
36318 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
36321 machine_mode mode
= TYPE_MODE (char_type_node
);
36322 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
36323 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
36324 rtx value
= gen_int_mode (16, mode
);
36326 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36327 The unwinder still needs to know the size of Altivec registers. */
36329 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
36331 int column
= DWARF_REG_TO_UNWIND_COLUMN
36332 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
36333 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
36335 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
36340 /* Map internal gcc register numbers to debug format register numbers.
36341 FORMAT specifies the type of debug register number to use:
36342 0 -- debug information, except for frame-related sections
36343 1 -- DWARF .debug_frame section
36344 2 -- DWARF .eh_frame section */
36347 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
36349 /* Except for the above, we use the internal number for non-DWARF
36350 debug information, and also for .eh_frame. */
36351 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
36354 /* On some platforms, we use the standard DWARF register
36355 numbering for .debug_info and .debug_frame. */
36356 #ifdef RS6000_USE_DWARF_NUMBERING
36359 if (regno
== LR_REGNO
)
36361 if (regno
== CTR_REGNO
)
36363 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36364 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36365 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36366 to the DWARF reg for CR. */
36367 if (format
== 1 && regno
== CR2_REGNO
)
36369 if (CR_REGNO_P (regno
))
36370 return regno
- CR0_REGNO
+ 86;
36371 if (regno
== CA_REGNO
)
36372 return 101; /* XER */
36373 if (ALTIVEC_REGNO_P (regno
))
36374 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
36375 if (regno
== VRSAVE_REGNO
)
36377 if (regno
== VSCR_REGNO
)
36383 /* target hook eh_return_filter_mode */
36384 static scalar_int_mode
36385 rs6000_eh_return_filter_mode (void)
36387 return TARGET_32BIT
? SImode
: word_mode
;
36390 /* Target hook for scalar_mode_supported_p. */
36392 rs6000_scalar_mode_supported_p (scalar_mode mode
)
36394 /* -m32 does not support TImode. This is the default, from
36395 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36396 same ABI as for -m32. But default_scalar_mode_supported_p allows
36397 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36398 for -mpowerpc64. */
36399 if (TARGET_32BIT
&& mode
== TImode
)
36402 if (DECIMAL_FLOAT_MODE_P (mode
))
36403 return default_decimal_float_supported_p ();
36404 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
36407 return default_scalar_mode_supported_p (mode
);
36410 /* Target hook for vector_mode_supported_p. */
36412 rs6000_vector_mode_supported_p (machine_mode mode
)
36415 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
36418 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36419 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36421 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
36428 /* Target hook for floatn_mode. */
36429 static opt_scalar_float_mode
36430 rs6000_floatn_mode (int n
, bool extended
)
36440 if (TARGET_FLOAT128_TYPE
)
36441 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36443 return opt_scalar_float_mode ();
36446 return opt_scalar_float_mode ();
36449 /* Those are the only valid _FloatNx types. */
36450 gcc_unreachable ();
36464 if (TARGET_FLOAT128_TYPE
)
36465 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36467 return opt_scalar_float_mode ();
36470 return opt_scalar_float_mode ();
36476 /* Target hook for c_mode_for_suffix. */
36477 static machine_mode
36478 rs6000_c_mode_for_suffix (char suffix
)
36480 if (TARGET_FLOAT128_TYPE
)
36482 if (suffix
== 'q' || suffix
== 'Q')
36483 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36485 /* At the moment, we are not defining a suffix for IBM extended double.
36486 If/when the default for -mabi=ieeelongdouble is changed, and we want
36487 to support __ibm128 constants in legacy library code, we may need to
36488 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36489 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36490 __float80 constants. */
36496 /* Target hook for invalid_arg_for_unprototyped_fn. */
36497 static const char *
36498 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
36500 return (!rs6000_darwin64_abi
36502 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
36503 && (funcdecl
== NULL_TREE
36504 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
36505 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
36506 ? N_("AltiVec argument passed to unprototyped function")
36510 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36511 setup by using __stack_chk_fail_local hidden function instead of
36512 calling __stack_chk_fail directly. Otherwise it is better to call
36513 __stack_chk_fail directly. */
36515 static tree ATTRIBUTE_UNUSED
36516 rs6000_stack_protect_fail (void)
36518 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
36519 ? default_hidden_stack_protect_fail ()
36520 : default_external_stack_protect_fail ();
36523 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36526 static unsigned HOST_WIDE_INT
36527 rs6000_asan_shadow_offset (void)
36529 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
36533 /* Mask options that we want to support inside of attribute((target)) and
36534 #pragma GCC target operations. Note, we do not include things like
36535 64/32-bit, endianness, hard/soft floating point, etc. that would have
36536 different calling sequences. */
36538 struct rs6000_opt_mask
{
36539 const char *name
; /* option name */
36540 HOST_WIDE_INT mask
; /* mask to set */
36541 bool invert
; /* invert sense of mask */
36542 bool valid_target
; /* option is a target option */
36545 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
36547 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
36548 { "cmpb", OPTION_MASK_CMPB
, false, true },
36549 { "crypto", OPTION_MASK_CRYPTO
, false, true },
36550 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
36551 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
36552 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
36554 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, true },
36555 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, true },
36556 { "fprnd", OPTION_MASK_FPRND
, false, true },
36557 { "hard-dfp", OPTION_MASK_DFP
, false, true },
36558 { "htm", OPTION_MASK_HTM
, false, true },
36559 { "isel", OPTION_MASK_ISEL
, false, true },
36560 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
36561 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
36562 { "modulo", OPTION_MASK_MODULO
, false, true },
36563 { "mulhw", OPTION_MASK_MULHW
, false, true },
36564 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
36565 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
36566 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
36567 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
36568 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
36569 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
36570 { "power9-fusion", OPTION_MASK_P9_FUSION
, false, true },
36571 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
36572 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
36573 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
36574 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
36575 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
36576 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
36577 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
36578 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
36579 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
36580 { "string", OPTION_MASK_STRING
, false, true },
36581 { "toc-fusion", OPTION_MASK_TOC_FUSION
, false, true },
36582 { "update", OPTION_MASK_NO_UPDATE
, true , true },
36583 { "vsx", OPTION_MASK_VSX
, false, true },
36584 #ifdef OPTION_MASK_64BIT
36586 { "aix64", OPTION_MASK_64BIT
, false, false },
36587 { "aix32", OPTION_MASK_64BIT
, true, false },
36589 { "64", OPTION_MASK_64BIT
, false, false },
36590 { "32", OPTION_MASK_64BIT
, true, false },
36593 #ifdef OPTION_MASK_EABI
36594 { "eabi", OPTION_MASK_EABI
, false, false },
36596 #ifdef OPTION_MASK_LITTLE_ENDIAN
36597 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
36598 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
36600 #ifdef OPTION_MASK_RELOCATABLE
36601 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
36603 #ifdef OPTION_MASK_STRICT_ALIGN
36604 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
36606 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
36607 { "string", OPTION_MASK_STRING
, false, false },
36610 /* Builtin mask mapping for printing the flags. */
36611 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
36613 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
36614 { "vsx", RS6000_BTM_VSX
, false, false },
36615 { "paired", RS6000_BTM_PAIRED
, false, false },
36616 { "fre", RS6000_BTM_FRE
, false, false },
36617 { "fres", RS6000_BTM_FRES
, false, false },
36618 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
36619 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
36620 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
36621 { "cell", RS6000_BTM_CELL
, false, false },
36622 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
36623 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
36624 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
36625 { "crypto", RS6000_BTM_CRYPTO
, false, false },
36626 { "htm", RS6000_BTM_HTM
, false, false },
36627 { "hard-dfp", RS6000_BTM_DFP
, false, false },
36628 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
36629 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
36630 { "float128", RS6000_BTM_FLOAT128
, false, false },
36631 { "float128-hw", RS6000_BTM_FLOAT128_HW
,false, false },
36634 /* Option variables that we want to support inside attribute((target)) and
36635 #pragma GCC target operations. */
36637 struct rs6000_opt_var
{
36638 const char *name
; /* option name */
36639 size_t global_offset
; /* offset of the option in global_options. */
36640 size_t target_offset
; /* offset of the option in target options. */
36643 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
36646 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
36647 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
36648 { "avoid-indexed-addresses",
36649 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
36650 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
36652 offsetof (struct gcc_options
, x_rs6000_paired_float
),
36653 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
36655 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
36656 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
36657 { "optimize-swaps",
36658 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
36659 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
36660 { "allow-movmisalign",
36661 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
36662 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
36664 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
36665 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
36667 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
36668 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
36669 { "align-branch-targets",
36670 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
36671 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
36673 offsetof (struct gcc_options
, x_tls_markers
),
36674 offsetof (struct cl_target_option
, x_tls_markers
), },
36676 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36677 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36679 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36680 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36683 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36684 parsing. Return true if there were no errors. */
36687 rs6000_inner_target_options (tree args
, bool attr_p
)
36691 if (args
== NULL_TREE
)
36694 else if (TREE_CODE (args
) == STRING_CST
)
36696 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36699 while ((q
= strtok (p
, ",")) != NULL
)
36701 bool error_p
= false;
36702 bool not_valid_p
= false;
36703 const char *cpu_opt
= NULL
;
36706 if (strncmp (q
, "cpu=", 4) == 0)
36708 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36709 if (cpu_index
>= 0)
36710 rs6000_cpu_index
= cpu_index
;
36717 else if (strncmp (q
, "tune=", 5) == 0)
36719 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36720 if (tune_index
>= 0)
36721 rs6000_tune_index
= tune_index
;
36731 bool invert
= false;
36735 if (strncmp (r
, "no-", 3) == 0)
36741 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36742 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36744 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36746 if (!rs6000_opt_masks
[i
].valid_target
)
36747 not_valid_p
= true;
36751 rs6000_isa_flags_explicit
|= mask
;
36753 /* VSX needs altivec, so -mvsx automagically sets
36754 altivec and disables -mavoid-indexed-addresses. */
36757 if (mask
== OPTION_MASK_VSX
)
36759 mask
|= OPTION_MASK_ALTIVEC
;
36760 TARGET_AVOID_XFORM
= 0;
36764 if (rs6000_opt_masks
[i
].invert
)
36768 rs6000_isa_flags
&= ~mask
;
36770 rs6000_isa_flags
|= mask
;
36775 if (error_p
&& !not_valid_p
)
36777 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36778 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36780 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36781 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36783 not_valid_p
= false;
36791 const char *eprefix
, *esuffix
;
36796 eprefix
= "__attribute__((__target__(";
36801 eprefix
= "#pragma GCC target ";
36806 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36808 else if (not_valid_p
)
36809 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36811 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36816 else if (TREE_CODE (args
) == TREE_LIST
)
36820 tree value
= TREE_VALUE (args
);
36823 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36827 args
= TREE_CHAIN (args
);
36829 while (args
!= NULL_TREE
);
36834 error ("attribute %<target%> argument not a string");
36841 /* Print out the target options as a list for -mdebug=target. */
36844 rs6000_debug_target_options (tree args
, const char *prefix
)
36846 if (args
== NULL_TREE
)
36847 fprintf (stderr
, "%s<NULL>", prefix
);
36849 else if (TREE_CODE (args
) == STRING_CST
)
36851 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36854 while ((q
= strtok (p
, ",")) != NULL
)
36857 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36862 else if (TREE_CODE (args
) == TREE_LIST
)
36866 tree value
= TREE_VALUE (args
);
36869 rs6000_debug_target_options (value
, prefix
);
36872 args
= TREE_CHAIN (args
);
36874 while (args
!= NULL_TREE
);
36878 gcc_unreachable ();
36884 /* Hook to validate attribute((target("..."))). */
36887 rs6000_valid_attribute_p (tree fndecl
,
36888 tree
ARG_UNUSED (name
),
36892 struct cl_target_option cur_target
;
36895 tree new_target
, new_optimize
;
36896 tree func_optimize
;
36898 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36900 if (TARGET_DEBUG_TARGET
)
36902 tree tname
= DECL_NAME (fndecl
);
36903 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36905 fprintf (stderr
, "function: %.*s\n",
36906 (int) IDENTIFIER_LENGTH (tname
),
36907 IDENTIFIER_POINTER (tname
));
36909 fprintf (stderr
, "function: unknown\n");
36911 fprintf (stderr
, "args:");
36912 rs6000_debug_target_options (args
, " ");
36913 fprintf (stderr
, "\n");
36916 fprintf (stderr
, "flags: 0x%x\n", flags
);
36918 fprintf (stderr
, "--------------------\n");
36921 /* attribute((target("default"))) does nothing, beyond
36922 affecting multi-versioning. */
36923 if (TREE_VALUE (args
)
36924 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36925 && TREE_CHAIN (args
) == NULL_TREE
36926 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36929 old_optimize
= build_optimization_node (&global_options
);
36930 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36932 /* If the function changed the optimization levels as well as setting target
36933 options, start with the optimizations specified. */
36934 if (func_optimize
&& func_optimize
!= old_optimize
)
36935 cl_optimization_restore (&global_options
,
36936 TREE_OPTIMIZATION (func_optimize
));
36938 /* The target attributes may also change some optimization flags, so update
36939 the optimization options if necessary. */
36940 cl_target_option_save (&cur_target
, &global_options
);
36941 rs6000_cpu_index
= rs6000_tune_index
= -1;
36942 ret
= rs6000_inner_target_options (args
, true);
36944 /* Set up any additional state. */
36947 ret
= rs6000_option_override_internal (false);
36948 new_target
= build_target_option_node (&global_options
);
36953 new_optimize
= build_optimization_node (&global_options
);
36960 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
36962 if (old_optimize
!= new_optimize
)
36963 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
36966 cl_target_option_restore (&global_options
, &cur_target
);
36968 if (old_optimize
!= new_optimize
)
36969 cl_optimization_restore (&global_options
,
36970 TREE_OPTIMIZATION (old_optimize
));
36976 /* Hook to validate the current #pragma GCC target and set the state, and
36977 update the macros based on what was changed. If ARGS is NULL, then
36978 POP_TARGET is used to reset the options. */
36981 rs6000_pragma_target_parse (tree args
, tree pop_target
)
36983 tree prev_tree
= build_target_option_node (&global_options
);
36985 struct cl_target_option
*prev_opt
, *cur_opt
;
36986 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
36987 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
36989 if (TARGET_DEBUG_TARGET
)
36991 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
36992 fprintf (stderr
, "args:");
36993 rs6000_debug_target_options (args
, " ");
36994 fprintf (stderr
, "\n");
36998 fprintf (stderr
, "pop_target:\n");
36999 debug_tree (pop_target
);
37002 fprintf (stderr
, "pop_target: <NULL>\n");
37004 fprintf (stderr
, "--------------------\n");
37009 cur_tree
= ((pop_target
)
37011 : target_option_default_node
);
37012 cl_target_option_restore (&global_options
,
37013 TREE_TARGET_OPTION (cur_tree
));
37017 rs6000_cpu_index
= rs6000_tune_index
= -1;
37018 if (!rs6000_inner_target_options (args
, false)
37019 || !rs6000_option_override_internal (false)
37020 || (cur_tree
= build_target_option_node (&global_options
))
37023 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
37024 fprintf (stderr
, "invalid pragma\n");
37030 target_option_current_node
= cur_tree
;
37031 rs6000_activate_target_options (target_option_current_node
);
37033 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37034 change the macros that are defined. */
37035 if (rs6000_target_modify_macros_ptr
)
37037 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
37038 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
37039 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
37041 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
37042 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
37043 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
37045 diff_bumask
= (prev_bumask
^ cur_bumask
);
37046 diff_flags
= (prev_flags
^ cur_flags
);
37048 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
37050 /* Delete old macros. */
37051 rs6000_target_modify_macros_ptr (false,
37052 prev_flags
& diff_flags
,
37053 prev_bumask
& diff_bumask
);
37055 /* Define new macros. */
37056 rs6000_target_modify_macros_ptr (true,
37057 cur_flags
& diff_flags
,
37058 cur_bumask
& diff_bumask
);
37066 /* Remember the last target of rs6000_set_current_function. */
37067 static GTY(()) tree rs6000_previous_fndecl
;
37069 /* Restore target's globals from NEW_TREE and invalidate the
37070 rs6000_previous_fndecl cache. */
37073 rs6000_activate_target_options (tree new_tree
)
37075 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
37076 if (TREE_TARGET_GLOBALS (new_tree
))
37077 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
37078 else if (new_tree
== target_option_default_node
)
37079 restore_target_globals (&default_target_globals
);
37081 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
37082 rs6000_previous_fndecl
= NULL_TREE
;
37085 /* Establish appropriate back-end context for processing the function
37086 FNDECL. The argument might be NULL to indicate processing at top
37087 level, outside of any function scope. */
37089 rs6000_set_current_function (tree fndecl
)
37091 if (TARGET_DEBUG_TARGET
)
37093 fprintf (stderr
, "\n==================== rs6000_set_current_function");
37096 fprintf (stderr
, ", fndecl %s (%p)",
37097 (DECL_NAME (fndecl
)
37098 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
37099 : "<unknown>"), (void *)fndecl
);
37101 if (rs6000_previous_fndecl
)
37102 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
37104 fprintf (stderr
, "\n");
37107 /* Only change the context if the function changes. This hook is called
37108 several times in the course of compiling a function, and we don't want to
37109 slow things down too much or call target_reinit when it isn't safe. */
37110 if (fndecl
== rs6000_previous_fndecl
)
37114 if (rs6000_previous_fndecl
== NULL_TREE
)
37115 old_tree
= target_option_current_node
;
37116 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
37117 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
37119 old_tree
= target_option_default_node
;
37122 if (fndecl
== NULL_TREE
)
37124 if (old_tree
!= target_option_current_node
)
37125 new_tree
= target_option_current_node
;
37127 new_tree
= NULL_TREE
;
37131 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37132 if (new_tree
== NULL_TREE
)
37133 new_tree
= target_option_default_node
;
37136 if (TARGET_DEBUG_TARGET
)
37140 fprintf (stderr
, "\nnew fndecl target specific options:\n");
37141 debug_tree (new_tree
);
37146 fprintf (stderr
, "\nold fndecl target specific options:\n");
37147 debug_tree (old_tree
);
37150 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
37151 fprintf (stderr
, "--------------------\n");
37154 if (new_tree
&& old_tree
!= new_tree
)
37155 rs6000_activate_target_options (new_tree
);
37158 rs6000_previous_fndecl
= fndecl
;
37162 /* Save the current options */
37165 rs6000_function_specific_save (struct cl_target_option
*ptr
,
37166 struct gcc_options
*opts
)
37168 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
37169 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
37172 /* Restore the current options */
37175 rs6000_function_specific_restore (struct gcc_options
*opts
,
37176 struct cl_target_option
*ptr
)
37179 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
37180 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
37181 (void) rs6000_option_override_internal (false);
37184 /* Print the current options */
37187 rs6000_function_specific_print (FILE *file
, int indent
,
37188 struct cl_target_option
*ptr
)
37190 rs6000_print_isa_options (file
, indent
, "Isa options set",
37191 ptr
->x_rs6000_isa_flags
);
37193 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
37194 ptr
->x_rs6000_isa_flags_explicit
);
37197 /* Helper function to print the current isa or misc options on a line. */
37200 rs6000_print_options_internal (FILE *file
,
37202 const char *string
,
37203 HOST_WIDE_INT flags
,
37204 const char *prefix
,
37205 const struct rs6000_opt_mask
*opts
,
37206 size_t num_elements
)
37209 size_t start_column
= 0;
37211 size_t max_column
= 120;
37212 size_t prefix_len
= strlen (prefix
);
37213 size_t comma_len
= 0;
37214 const char *comma
= "";
37217 start_column
+= fprintf (file
, "%*s", indent
, "");
37221 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
37225 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
37227 /* Print the various mask options. */
37228 cur_column
= start_column
;
37229 for (i
= 0; i
< num_elements
; i
++)
37231 bool invert
= opts
[i
].invert
;
37232 const char *name
= opts
[i
].name
;
37233 const char *no_str
= "";
37234 HOST_WIDE_INT mask
= opts
[i
].mask
;
37235 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
37239 if ((flags
& mask
) == 0)
37242 len
+= sizeof ("no-") - 1;
37250 if ((flags
& mask
) != 0)
37253 len
+= sizeof ("no-") - 1;
37260 if (cur_column
> max_column
)
37262 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
37263 cur_column
= start_column
+ len
;
37267 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
37269 comma_len
= sizeof (", ") - 1;
37272 fputs ("\n", file
);
37275 /* Helper function to print the current isa options on a line. */
37278 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
37279 HOST_WIDE_INT flags
)
37281 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
37282 &rs6000_opt_masks
[0],
37283 ARRAY_SIZE (rs6000_opt_masks
));
37287 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
37288 HOST_WIDE_INT flags
)
37290 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
37291 &rs6000_builtin_mask_names
[0],
37292 ARRAY_SIZE (rs6000_builtin_mask_names
));
37295 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37296 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37297 -mupper-regs-df, etc.).
37299 If the user used -mno-power8-vector, we need to turn off all of the implicit
37300 ISA 2.07 and 3.0 options that relate to the vector unit.
37302 If the user used -mno-power9-vector, we need to turn off all of the implicit
37303 ISA 3.0 options that relate to the vector unit.
37305 This function does not handle explicit options such as the user specifying
37306 -mdirect-move. These are handled in rs6000_option_override_internal, and
37307 the appropriate error is given if needed.
37309 We return a mask of all of the implicit options that should not be enabled
37312 static HOST_WIDE_INT
37313 rs6000_disable_incompatible_switches (void)
37315 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
37318 static const struct {
37319 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
37320 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
37321 const char *const name
; /* name of the switch. */
37323 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
37324 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
37325 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
37328 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
37330 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
37332 if ((rs6000_isa_flags
& no_flag
) == 0
37333 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
37335 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
37336 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
37342 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
37343 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
37345 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
37346 error ("%<-mno-%s%> turns off %<-m%s%>",
37348 rs6000_opt_masks
[j
].name
);
37351 gcc_assert (!set_flags
);
37354 rs6000_isa_flags
&= ~dep_flags
;
37355 ignore_masks
|= no_flag
| dep_flags
;
37359 return ignore_masks
;
37363 /* Helper function for printing the function name when debugging. */
37365 static const char *
37366 get_decl_name (tree fn
)
37373 name
= DECL_NAME (fn
);
37375 return "<no-name>";
37377 return IDENTIFIER_POINTER (name
);
37380 /* Return the clone id of the target we are compiling code for in a target
37381 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37382 the priority list for the target clones (ordered from lowest to
37386 rs6000_clone_priority (tree fndecl
)
37388 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37389 HOST_WIDE_INT isa_masks
;
37390 int ret
= CLONE_DEFAULT
;
37391 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
37392 const char *attrs_str
= NULL
;
37394 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
37395 attrs_str
= TREE_STRING_POINTER (attrs
);
37397 /* Return priority zero for default function. Return the ISA needed for the
37398 function if it is not the default. */
37399 if (strcmp (attrs_str
, "default") != 0)
37401 if (fn_opts
== NULL_TREE
)
37402 fn_opts
= target_option_default_node
;
37404 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
37405 isa_masks
= rs6000_isa_flags
;
37407 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
37409 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
37410 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
37414 if (TARGET_DEBUG_TARGET
)
37415 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
37416 get_decl_name (fndecl
), ret
);
37421 /* This compares the priority of target features in function DECL1 and DECL2.
37422 It returns positive value if DECL1 is higher priority, negative value if
37423 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37424 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37427 rs6000_compare_version_priority (tree decl1
, tree decl2
)
37429 int priority1
= rs6000_clone_priority (decl1
);
37430 int priority2
= rs6000_clone_priority (decl2
);
37431 int ret
= priority1
- priority2
;
37433 if (TARGET_DEBUG_TARGET
)
37434 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
37435 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
37440 /* Make a dispatcher declaration for the multi-versioned function DECL.
37441 Calls to DECL function will be replaced with calls to the dispatcher
37442 by the front-end. Returns the decl of the dispatcher function. */
37445 rs6000_get_function_versions_dispatcher (void *decl
)
37447 tree fn
= (tree
) decl
;
37448 struct cgraph_node
*node
= NULL
;
37449 struct cgraph_node
*default_node
= NULL
;
37450 struct cgraph_function_version_info
*node_v
= NULL
;
37451 struct cgraph_function_version_info
*first_v
= NULL
;
37453 tree dispatch_decl
= NULL
;
37455 struct cgraph_function_version_info
*default_version_info
= NULL
;
37456 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
37458 if (TARGET_DEBUG_TARGET
)
37459 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
37460 get_decl_name (fn
));
37462 node
= cgraph_node::get (fn
);
37463 gcc_assert (node
!= NULL
);
37465 node_v
= node
->function_version ();
37466 gcc_assert (node_v
!= NULL
);
37468 if (node_v
->dispatcher_resolver
!= NULL
)
37469 return node_v
->dispatcher_resolver
;
37471 /* Find the default version and make it the first node. */
37473 /* Go to the beginning of the chain. */
37474 while (first_v
->prev
!= NULL
)
37475 first_v
= first_v
->prev
;
37477 default_version_info
= first_v
;
37478 while (default_version_info
!= NULL
)
37480 const tree decl2
= default_version_info
->this_node
->decl
;
37481 if (is_function_default_version (decl2
))
37483 default_version_info
= default_version_info
->next
;
37486 /* If there is no default node, just return NULL. */
37487 if (default_version_info
== NULL
)
37490 /* Make default info the first node. */
37491 if (first_v
!= default_version_info
)
37493 default_version_info
->prev
->next
= default_version_info
->next
;
37494 if (default_version_info
->next
)
37495 default_version_info
->next
->prev
= default_version_info
->prev
;
37496 first_v
->prev
= default_version_info
;
37497 default_version_info
->next
= first_v
;
37498 default_version_info
->prev
= NULL
;
37501 default_node
= default_version_info
->this_node
;
37503 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37504 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37505 "target_clones attribute needs GLIBC (2.23 and newer) that "
37506 "exports hardware capability bits");
37509 if (targetm
.has_ifunc_p ())
37511 struct cgraph_function_version_info
*it_v
= NULL
;
37512 struct cgraph_node
*dispatcher_node
= NULL
;
37513 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
37515 /* Right now, the dispatching is done via ifunc. */
37516 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
37518 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
37519 gcc_assert (dispatcher_node
!= NULL
);
37520 dispatcher_node
->dispatcher_function
= 1;
37521 dispatcher_version_info
37522 = dispatcher_node
->insert_new_function_version ();
37523 dispatcher_version_info
->next
= default_version_info
;
37524 dispatcher_node
->definition
= 1;
37526 /* Set the dispatcher for all the versions. */
37527 it_v
= default_version_info
;
37528 while (it_v
!= NULL
)
37530 it_v
->dispatcher_resolver
= dispatch_decl
;
37536 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37537 "multiversioning needs ifunc which is not supported "
37542 return dispatch_decl
;
37545 /* Make the resolver function decl to dispatch the versions of a multi-
37546 versioned function, DEFAULT_DECL. Create an empty basic block in the
37547 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37551 make_resolver_func (const tree default_decl
,
37552 const tree dispatch_decl
,
37553 basic_block
*empty_bb
)
37555 /* Make the resolver function static. The resolver function returns
37557 tree decl_name
= clone_function_name (default_decl
, "resolver");
37558 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
37559 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
37560 tree decl
= build_fn_decl (resolver_name
, type
);
37561 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
37563 DECL_NAME (decl
) = decl_name
;
37564 TREE_USED (decl
) = 1;
37565 DECL_ARTIFICIAL (decl
) = 1;
37566 DECL_IGNORED_P (decl
) = 0;
37567 TREE_PUBLIC (decl
) = 0;
37568 DECL_UNINLINABLE (decl
) = 1;
37570 /* Resolver is not external, body is generated. */
37571 DECL_EXTERNAL (decl
) = 0;
37572 DECL_EXTERNAL (dispatch_decl
) = 0;
37574 DECL_CONTEXT (decl
) = NULL_TREE
;
37575 DECL_INITIAL (decl
) = make_node (BLOCK
);
37576 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
37578 /* Build result decl and add to function_decl. */
37579 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
37580 DECL_ARTIFICIAL (t
) = 1;
37581 DECL_IGNORED_P (t
) = 1;
37582 DECL_RESULT (decl
) = t
;
37584 gimplify_function_tree (decl
);
37585 push_cfun (DECL_STRUCT_FUNCTION (decl
));
37586 *empty_bb
= init_lowered_empty_function (decl
, false,
37587 profile_count::uninitialized ());
37589 cgraph_node::add_new_function (decl
, true);
37590 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
37594 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37595 DECL_ATTRIBUTES (dispatch_decl
)
37596 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
37598 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
37603 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37604 return a pointer to VERSION_DECL if we are running on a machine that
37605 supports the index CLONE_ISA hardware architecture bits. This function will
37606 be called during version dispatch to decide which function version to
37607 execute. It returns the basic block at the end, to which more conditions
37611 add_condition_to_bb (tree function_decl
, tree version_decl
,
37612 int clone_isa
, basic_block new_bb
)
37614 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
37616 gcc_assert (new_bb
!= NULL
);
37617 gimple_seq gseq
= bb_seq (new_bb
);
37620 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
37621 build_fold_addr_expr (version_decl
));
37622 tree result_var
= create_tmp_var (ptr_type_node
);
37623 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
37624 gimple
*return_stmt
= gimple_build_return (result_var
);
37626 if (clone_isa
== CLONE_DEFAULT
)
37628 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37629 gimple_seq_add_stmt (&gseq
, return_stmt
);
37630 set_bb_seq (new_bb
, gseq
);
37631 gimple_set_bb (convert_stmt
, new_bb
);
37632 gimple_set_bb (return_stmt
, new_bb
);
37637 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
37638 tree cond_var
= create_tmp_var (bool_int_type_node
);
37639 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
37640 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
37641 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
37642 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
37643 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
37645 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
37646 gimple_set_bb (call_cond_stmt
, new_bb
);
37647 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
37649 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
37650 NULL_TREE
, NULL_TREE
);
37651 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
37652 gimple_set_bb (if_else_stmt
, new_bb
);
37653 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
37655 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37656 gimple_seq_add_stmt (&gseq
, return_stmt
);
37657 set_bb_seq (new_bb
, gseq
);
37659 basic_block bb1
= new_bb
;
37660 edge e12
= split_block (bb1
, if_else_stmt
);
37661 basic_block bb2
= e12
->dest
;
37662 e12
->flags
&= ~EDGE_FALLTHRU
;
37663 e12
->flags
|= EDGE_TRUE_VALUE
;
37665 edge e23
= split_block (bb2
, return_stmt
);
37666 gimple_set_bb (convert_stmt
, bb2
);
37667 gimple_set_bb (return_stmt
, bb2
);
37669 basic_block bb3
= e23
->dest
;
37670 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
37673 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
37679 /* This function generates the dispatch function for multi-versioned functions.
37680 DISPATCH_DECL is the function which will contain the dispatch logic.
37681 FNDECLS are the function choices for dispatch, and is a tree chain.
37682 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37683 code is generated. */
37686 dispatch_function_versions (tree dispatch_decl
,
37688 basic_block
*empty_bb
)
37692 vec
<tree
> *fndecls
;
37693 tree clones
[CLONE_MAX
];
37695 if (TARGET_DEBUG_TARGET
)
37696 fputs ("dispatch_function_versions, top\n", stderr
);
37698 gcc_assert (dispatch_decl
!= NULL
37699 && fndecls_p
!= NULL
37700 && empty_bb
!= NULL
);
37702 /* fndecls_p is actually a vector. */
37703 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37705 /* At least one more version other than the default. */
37706 gcc_assert (fndecls
->length () >= 2);
37708 /* The first version in the vector is the default decl. */
37709 memset ((void *) clones
, '\0', sizeof (clones
));
37710 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37712 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37713 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37714 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37715 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37716 to insert the code here to do the call. */
37718 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37720 int priority
= rs6000_clone_priority (ele
);
37721 if (!clones
[priority
])
37722 clones
[priority
] = ele
;
37725 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37728 if (TARGET_DEBUG_TARGET
)
37729 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37730 ix
, get_decl_name (clones
[ix
]));
37732 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37739 /* Generate the dispatching code body to dispatch multi-versioned function
37740 DECL. The target hook is called to process the "target" attributes and
37741 provide the code to dispatch the right function at run-time. NODE points
37742 to the dispatcher decl whose body will be created. */
37745 rs6000_generate_version_dispatcher_body (void *node_p
)
37748 basic_block empty_bb
;
37749 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37750 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37752 if (ninfo
->dispatcher_resolver
)
37753 return ninfo
->dispatcher_resolver
;
37755 /* node is going to be an alias, so remove the finalized bit. */
37756 node
->definition
= false;
37758 /* The first version in the chain corresponds to the default version. */
37759 ninfo
->dispatcher_resolver
= resolver
37760 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37762 if (TARGET_DEBUG_TARGET
)
37763 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37764 get_decl_name (resolver
));
37766 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37767 auto_vec
<tree
, 2> fn_ver_vec
;
37769 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37771 vinfo
= vinfo
->next
)
37773 struct cgraph_node
*version
= vinfo
->this_node
;
37774 /* Check for virtual functions here again, as by this time it should
37775 have been determined if this function needs a vtable index or
37776 not. This happens for methods in derived classes that override
37777 virtual methods in base classes but are not explicitly marked as
37779 if (DECL_VINDEX (version
->decl
))
37780 sorry ("Virtual function multiversioning not supported");
37782 fn_ver_vec
.safe_push (version
->decl
);
37785 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37786 cgraph_edge::rebuild_edges ();
37792 /* Hook to determine if one function can safely inline another. */
37795 rs6000_can_inline_p (tree caller
, tree callee
)
37798 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37799 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37801 /* If callee has no option attributes, then it is ok to inline. */
37805 /* If caller has no option attributes, but callee does then it is not ok to
37807 else if (!caller_tree
)
37812 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37813 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37815 /* Callee's options should a subset of the caller's, i.e. a vsx function
37816 can inline an altivec function but a non-vsx function can't inline a
37818 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37819 == callee_opts
->x_rs6000_isa_flags
)
37823 if (TARGET_DEBUG_TARGET
)
37824 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37825 get_decl_name (caller
), get_decl_name (callee
),
37826 (ret
? "can" : "cannot"));
37831 /* Allocate a stack temp and fixup the address so it meets the particular
37832 memory requirements (either offetable or REG+REG addressing). */
37835 rs6000_allocate_stack_temp (machine_mode mode
,
37836 bool offsettable_p
,
37839 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37840 rtx addr
= XEXP (stack
, 0);
37841 int strict_p
= reload_completed
;
37843 if (!legitimate_indirect_address_p (addr
, strict_p
))
37846 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37847 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37849 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37850 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37856 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37857 to such a form to deal with memory reference instructions like STFIWX that
37858 only take reg+reg addressing. */
37861 rs6000_address_for_fpconvert (rtx x
)
37865 gcc_assert (MEM_P (x
));
37866 addr
= XEXP (x
, 0);
37867 if (! legitimate_indirect_address_p (addr
, reload_completed
)
37868 && ! legitimate_indexed_address_p (addr
, reload_completed
))
37870 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37872 rtx reg
= XEXP (addr
, 0);
37873 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37874 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37875 gcc_assert (REG_P (reg
));
37876 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37879 else if (GET_CODE (addr
) == PRE_MODIFY
)
37881 rtx reg
= XEXP (addr
, 0);
37882 rtx expr
= XEXP (addr
, 1);
37883 gcc_assert (REG_P (reg
));
37884 gcc_assert (GET_CODE (expr
) == PLUS
);
37885 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37889 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
37895 /* Given a memory reference, if it is not in the form for altivec memory
37896 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37897 convert to the altivec format. */
37900 rs6000_address_for_altivec (rtx x
)
37902 gcc_assert (MEM_P (x
));
37903 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
37905 rtx addr
= XEXP (x
, 0);
37907 if (!legitimate_indexed_address_p (addr
, reload_completed
)
37908 && !legitimate_indirect_address_p (addr
, reload_completed
))
37909 addr
= copy_to_mode_reg (Pmode
, addr
);
37911 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
37912 x
= change_address (x
, GET_MODE (x
), addr
);
37918 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37920 On the RS/6000, all integer constants are acceptable, most won't be valid
37921 for particular insns, though. Only easy FP constants are acceptable. */
37924 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37926 if (TARGET_ELF
&& tls_referenced_p (x
))
37929 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
37930 || GET_MODE (x
) == VOIDmode
37931 || (TARGET_POWERPC64
&& mode
== DImode
)
37932 || easy_fp_constant (x
, mode
)
37933 || easy_vector_constant (x
, mode
));
37937 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37940 chain_already_loaded (rtx_insn
*last
)
37942 for (; last
!= NULL
; last
= PREV_INSN (last
))
37944 if (NONJUMP_INSN_P (last
))
37946 rtx patt
= PATTERN (last
);
37948 if (GET_CODE (patt
) == SET
)
37950 rtx lhs
= XEXP (patt
, 0);
37952 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
37960 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37963 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37965 const bool direct_call_p
37966 = GET_CODE (func_desc
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (func_desc
);
37967 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
37968 rtx toc_load
= NULL_RTX
;
37969 rtx toc_restore
= NULL_RTX
;
37971 rtx abi_reg
= NULL_RTX
;
37976 /* Handle longcall attributes. */
37977 if (INTVAL (cookie
) & CALL_LONG
)
37978 func_desc
= rs6000_longcall_ref (func_desc
);
37980 /* Handle indirect calls. */
37981 if (GET_CODE (func_desc
) != SYMBOL_REF
37982 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
37984 /* Save the TOC into its reserved slot before the call,
37985 and prepare to restore it after the call. */
37986 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
37987 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
37988 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
37989 gen_rtx_PLUS (Pmode
, stack_ptr
,
37990 stack_toc_offset
));
37991 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
37992 gen_rtvec (1, stack_toc_offset
),
37994 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
37996 /* Can we optimize saving the TOC in the prologue or
37997 do we need to do it at every call? */
37998 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
37999 cfun
->machine
->save_toc_in_prologue
= true;
38002 MEM_VOLATILE_P (stack_toc_mem
) = 1;
38003 emit_move_insn (stack_toc_mem
, toc_reg
);
38006 if (DEFAULT_ABI
== ABI_ELFv2
)
38008 /* A function pointer in the ELFv2 ABI is just a plain address, but
38009 the ABI requires it to be loaded into r12 before the call. */
38010 func_addr
= gen_rtx_REG (Pmode
, 12);
38011 emit_move_insn (func_addr
, func_desc
);
38012 abi_reg
= func_addr
;
38016 /* A function pointer under AIX is a pointer to a data area whose
38017 first word contains the actual address of the function, whose
38018 second word contains a pointer to its TOC, and whose third word
38019 contains a value to place in the static chain register (r11).
38020 Note that if we load the static chain, our "trampoline" need
38021 not have any executable code. */
38023 /* Load up address of the actual function. */
38024 func_desc
= force_reg (Pmode
, func_desc
);
38025 func_addr
= gen_reg_rtx (Pmode
);
38026 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
38028 /* Prepare to load the TOC of the called function. Note that the
38029 TOC load must happen immediately before the actual call so
38030 that unwinding the TOC registers works correctly. See the
38031 comment in frob_update_context. */
38032 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
38033 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
38034 gen_rtx_PLUS (Pmode
, func_desc
,
38036 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
38038 /* If we have a static chain, load it up. But, if the call was
38039 originally direct, the 3rd word has not been written since no
38040 trampoline has been built, so we ought not to load it, lest we
38041 override a static chain value. */
38043 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38044 && !chain_already_loaded (get_current_sequence ()->next
->last
))
38046 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
38047 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
38048 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
38049 gen_rtx_PLUS (Pmode
, func_desc
,
38051 emit_move_insn (sc_reg
, func_sc_mem
);
38058 /* Direct calls use the TOC: for local calls, the callee will
38059 assume the TOC register is set; for non-local calls, the
38060 PLT stub needs the TOC register. */
38062 func_addr
= func_desc
;
38065 /* Create the call. */
38066 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
38067 if (value
!= NULL_RTX
)
38068 call
[0] = gen_rtx_SET (value
, call
[0]);
38072 call
[n_call
++] = toc_load
;
38074 call
[n_call
++] = toc_restore
;
38076 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
38078 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
38079 insn
= emit_call_insn (insn
);
38081 /* Mention all registers defined by the ABI to hold information
38082 as uses in CALL_INSN_FUNCTION_USAGE. */
38084 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
38087 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38090 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
38095 gcc_assert (INTVAL (cookie
) == 0);
38097 /* Create the call. */
38098 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
38099 if (value
!= NULL_RTX
)
38100 call
[0] = gen_rtx_SET (value
, call
[0]);
38102 call
[1] = simple_return_rtx
;
38104 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
38105 insn
= emit_call_insn (insn
);
38107 /* Note use of the TOC register. */
38108 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
38111 /* Return whether we need to always update the saved TOC pointer when we update
38112 the stack pointer. */
38115 rs6000_save_toc_in_prologue_p (void)
38117 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
38120 #ifdef HAVE_GAS_HIDDEN
38121 # define USE_HIDDEN_LINKONCE 1
38123 # define USE_HIDDEN_LINKONCE 0
38126 /* Fills in the label name that should be used for a 476 link stack thunk. */
38129 get_ppc476_thunk_name (char name
[32])
38131 gcc_assert (TARGET_LINK_STACK
);
38133 if (USE_HIDDEN_LINKONCE
)
38134 sprintf (name
, "__ppc476.get_thunk");
38136 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
38139 /* This function emits the simple thunk routine that is used to preserve
38140 the link stack on the 476 cpu. */
38142 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
38144 rs6000_code_end (void)
38149 if (!TARGET_LINK_STACK
)
38152 get_ppc476_thunk_name (name
);
38154 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
38155 build_function_type_list (void_type_node
, NULL_TREE
));
38156 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
38157 NULL_TREE
, void_type_node
);
38158 TREE_PUBLIC (decl
) = 1;
38159 TREE_STATIC (decl
) = 1;
38162 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
38164 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
38165 targetm
.asm_out
.unique_section (decl
, 0);
38166 switch_to_section (get_named_section (decl
, NULL
, 0));
38167 DECL_WEAK (decl
) = 1;
38168 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
38169 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
38170 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
38171 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
38176 switch_to_section (text_section
);
38177 ASM_OUTPUT_LABEL (asm_out_file
, name
);
38180 DECL_INITIAL (decl
) = make_node (BLOCK
);
38181 current_function_decl
= decl
;
38182 allocate_struct_function (decl
, false);
38183 init_function_start (decl
);
38184 first_function_block_is_cold
= false;
38185 /* Make sure unwind info is emitted for the thunk if needed. */
38186 final_start_function (emit_barrier (), asm_out_file
, 1);
38188 fputs ("\tblr\n", asm_out_file
);
38190 final_end_function ();
38191 init_insn_lengths ();
38192 free_after_compilation (cfun
);
38194 current_function_decl
= NULL
;
38197 /* Add r30 to hard reg set if the prologue sets it up and it is not
38198 pic_offset_table_rtx. */
38201 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
38203 if (!TARGET_SINGLE_PIC_BASE
38205 && TARGET_MINIMAL_TOC
38206 && !constant_pool_empty_p ())
38207 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
38208 if (cfun
->machine
->split_stack_argp_used
)
38209 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
38211 /* Make sure the hard reg set doesn't include r2, which was possibly added
38212 via PIC_OFFSET_TABLE_REGNUM. */
38214 remove_from_hard_reg_set (&set
->set
, Pmode
, TOC_REGNUM
);
38218 /* Helper function for rs6000_split_logical to emit a logical instruction after
38219 spliting the operation to single GPR registers.
38221 DEST is the destination register.
38222 OP1 and OP2 are the input source registers.
38223 CODE is the base operation (AND, IOR, XOR, NOT).
38224 MODE is the machine mode.
38225 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38226 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38227 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38230 rs6000_split_logical_inner (rtx dest
,
38233 enum rtx_code code
,
38235 bool complement_final_p
,
38236 bool complement_op1_p
,
38237 bool complement_op2_p
)
38241 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38242 if (op2
&& GET_CODE (op2
) == CONST_INT
38243 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
38244 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38246 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
38247 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
38249 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38254 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
38258 else if (value
== mask
)
38260 if (!rtx_equal_p (dest
, op1
))
38261 emit_insn (gen_rtx_SET (dest
, op1
));
38266 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38267 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38268 else if (code
== IOR
|| code
== XOR
)
38272 if (!rtx_equal_p (dest
, op1
))
38273 emit_insn (gen_rtx_SET (dest
, op1
));
38279 if (code
== AND
&& mode
== SImode
38280 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38282 emit_insn (gen_andsi3 (dest
, op1
, op2
));
38286 if (complement_op1_p
)
38287 op1
= gen_rtx_NOT (mode
, op1
);
38289 if (complement_op2_p
)
38290 op2
= gen_rtx_NOT (mode
, op2
);
38292 /* For canonical RTL, if only one arm is inverted it is the first. */
38293 if (!complement_op1_p
&& complement_op2_p
)
38294 std::swap (op1
, op2
);
38296 bool_rtx
= ((code
== NOT
)
38297 ? gen_rtx_NOT (mode
, op1
)
38298 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
38300 if (complement_final_p
)
38301 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
38303 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
38306 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38307 operations are split immediately during RTL generation to allow for more
38308 optimizations of the AND/IOR/XOR.
38310 OPERANDS is an array containing the destination and two input operands.
38311 CODE is the base operation (AND, IOR, XOR, NOT).
38312 MODE is the machine mode.
38313 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38314 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38315 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38316 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38317 formation of the AND instructions. */
38320 rs6000_split_logical_di (rtx operands
[3],
38321 enum rtx_code code
,
38322 bool complement_final_p
,
38323 bool complement_op1_p
,
38324 bool complement_op2_p
)
38326 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
38327 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
38328 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
38329 enum hi_lo
{ hi
= 0, lo
= 1 };
38330 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
38333 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
38334 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
38335 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
38336 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
38339 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
38342 if (GET_CODE (operands
[2]) != CONST_INT
)
38344 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
38345 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
38349 HOST_WIDE_INT value
= INTVAL (operands
[2]);
38350 HOST_WIDE_INT value_hi_lo
[2];
38352 gcc_assert (!complement_final_p
);
38353 gcc_assert (!complement_op1_p
);
38354 gcc_assert (!complement_op2_p
);
38356 value_hi_lo
[hi
] = value
>> 32;
38357 value_hi_lo
[lo
] = value
& lower_32bits
;
38359 for (i
= 0; i
< 2; i
++)
38361 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
38363 if (sub_value
& sign_bit
)
38364 sub_value
|= upper_32bits
;
38366 op2_hi_lo
[i
] = GEN_INT (sub_value
);
38368 /* If this is an AND instruction, check to see if we need to load
38369 the value in a register. */
38370 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
38371 && !and_operand (op2_hi_lo
[i
], SImode
))
38372 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
38377 for (i
= 0; i
< 2; i
++)
38379 /* Split large IOR/XOR operations. */
38380 if ((code
== IOR
|| code
== XOR
)
38381 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
38382 && !complement_final_p
38383 && !complement_op1_p
38384 && !complement_op2_p
38385 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
38387 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
38388 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
38389 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
38390 rtx tmp
= gen_reg_rtx (SImode
);
38392 /* Make sure the constant is sign extended. */
38393 if ((hi_16bits
& sign_bit
) != 0)
38394 hi_16bits
|= upper_32bits
;
38396 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
38397 code
, SImode
, false, false, false);
38399 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
38400 code
, SImode
, false, false, false);
38403 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
38404 code
, SImode
, complement_final_p
,
38405 complement_op1_p
, complement_op2_p
);
38411 /* Split the insns that make up boolean operations operating on multiple GPR
38412 registers. The boolean MD patterns ensure that the inputs either are
38413 exactly the same as the output registers, or there is no overlap.
38415 OPERANDS is an array containing the destination and two input operands.
38416 CODE is the base operation (AND, IOR, XOR, NOT).
38417 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38418 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38419 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38422 rs6000_split_logical (rtx operands
[3],
38423 enum rtx_code code
,
38424 bool complement_final_p
,
38425 bool complement_op1_p
,
38426 bool complement_op2_p
)
38428 machine_mode mode
= GET_MODE (operands
[0]);
38429 machine_mode sub_mode
;
38431 int sub_size
, regno0
, regno1
, nregs
, i
;
38433 /* If this is DImode, use the specialized version that can run before
38434 register allocation. */
38435 if (mode
== DImode
&& !TARGET_POWERPC64
)
38437 rs6000_split_logical_di (operands
, code
, complement_final_p
,
38438 complement_op1_p
, complement_op2_p
);
38444 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
38445 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
38446 sub_size
= GET_MODE_SIZE (sub_mode
);
38447 regno0
= REGNO (op0
);
38448 regno1
= REGNO (op1
);
38450 gcc_assert (reload_completed
);
38451 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38452 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38454 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
38455 gcc_assert (nregs
> 1);
38457 if (op2
&& REG_P (op2
))
38458 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38460 for (i
= 0; i
< nregs
; i
++)
38462 int offset
= i
* sub_size
;
38463 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
38464 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
38465 rtx sub_op2
= ((code
== NOT
)
38467 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
38469 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
38470 complement_final_p
, complement_op1_p
,
38478 /* Return true if the peephole2 can combine a load involving a combination of
38479 an addis instruction and a load with an offset that can be fused together on
38483 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
38484 rtx addis_value
, /* addis value. */
38485 rtx target
, /* target register that is loaded. */
38486 rtx mem
) /* bottom part of the memory addr. */
38491 /* Validate arguments. */
38492 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38495 if (!base_reg_operand (target
, GET_MODE (target
)))
38498 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38501 /* Allow sign/zero extension. */
38502 if (GET_CODE (mem
) == ZERO_EXTEND
38503 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
38504 mem
= XEXP (mem
, 0);
38509 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
38512 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38513 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
38516 /* Validate that the register used to load the high value is either the
38517 register being loaded, or we can safely replace its use.
38519 This function is only called from the peephole2 pass and we assume that
38520 there are 2 instructions in the peephole (addis and load), so we want to
38521 check if the target register was not used in the memory address and the
38522 register to hold the addis result is dead after the peephole. */
38523 if (REGNO (addis_reg
) != REGNO (target
))
38525 if (reg_mentioned_p (target
, mem
))
38528 if (!peep2_reg_dead_p (2, addis_reg
))
38531 /* If the target register being loaded is the stack pointer, we must
38532 avoid loading any other value into it, even temporarily. */
38533 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
38537 base_reg
= XEXP (addr
, 0);
38538 return REGNO (addis_reg
) == REGNO (base_reg
);
38541 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38542 sequence. We adjust the addis register to use the target register. If the
38543 load sign extends, we adjust the code to do the zero extending load, and an
38544 explicit sign extension later since the fusion only covers zero extending
38548 operands[0] register set with addis (to be replaced with target)
38549 operands[1] value set via addis
38550 operands[2] target register being loaded
38551 operands[3] D-form memory reference using operands[0]. */
38554 expand_fusion_gpr_load (rtx
*operands
)
38556 rtx addis_value
= operands
[1];
38557 rtx target
= operands
[2];
38558 rtx orig_mem
= operands
[3];
38559 rtx new_addr
, new_mem
, orig_addr
, offset
;
38560 enum rtx_code plus_or_lo_sum
;
38561 machine_mode target_mode
= GET_MODE (target
);
38562 machine_mode extend_mode
= target_mode
;
38563 machine_mode ptr_mode
= Pmode
;
38564 enum rtx_code extend
= UNKNOWN
;
38566 if (GET_CODE (orig_mem
) == ZERO_EXTEND
38567 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
38569 extend
= GET_CODE (orig_mem
);
38570 orig_mem
= XEXP (orig_mem
, 0);
38571 target_mode
= GET_MODE (orig_mem
);
38574 gcc_assert (MEM_P (orig_mem
));
38576 orig_addr
= XEXP (orig_mem
, 0);
38577 plus_or_lo_sum
= GET_CODE (orig_addr
);
38578 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38580 offset
= XEXP (orig_addr
, 1);
38581 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38582 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38584 if (extend
!= UNKNOWN
)
38585 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
38587 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38588 UNSPEC_FUSION_GPR
);
38589 emit_insn (gen_rtx_SET (target
, new_mem
));
38591 if (extend
== SIGN_EXTEND
)
38593 int sub_off
= ((BYTES_BIG_ENDIAN
)
38594 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
38597 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
38599 emit_insn (gen_rtx_SET (target
,
38600 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
38606 /* Emit the addis instruction that will be part of a fused instruction
38610 emit_fusion_addis (rtx target
, rtx addis_value
, const char *comment
,
38611 const char *mode_name
)
38614 char insn_template
[80];
38615 const char *addis_str
= NULL
;
38616 const char *comment_str
= ASM_COMMENT_START
;
38618 if (*comment_str
== ' ')
38621 /* Emit the addis instruction. */
38622 fuse_ops
[0] = target
;
38623 if (satisfies_constraint_L (addis_value
))
38625 fuse_ops
[1] = addis_value
;
38626 addis_str
= "lis %0,%v1";
38629 else if (GET_CODE (addis_value
) == PLUS
)
38631 rtx op0
= XEXP (addis_value
, 0);
38632 rtx op1
= XEXP (addis_value
, 1);
38634 if (REG_P (op0
) && CONST_INT_P (op1
)
38635 && satisfies_constraint_L (op1
))
38639 addis_str
= "addis %0,%1,%v2";
38643 else if (GET_CODE (addis_value
) == HIGH
)
38645 rtx value
= XEXP (addis_value
, 0);
38646 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
38648 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
38649 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
38651 addis_str
= "addis %0,%2,%1@toc@ha";
38653 else if (TARGET_XCOFF
)
38654 addis_str
= "addis %0,%1@u(%2)";
38657 gcc_unreachable ();
38660 else if (GET_CODE (value
) == PLUS
)
38662 rtx op0
= XEXP (value
, 0);
38663 rtx op1
= XEXP (value
, 1);
38665 if (GET_CODE (op0
) == UNSPEC
38666 && XINT (op0
, 1) == UNSPEC_TOCREL
38667 && CONST_INT_P (op1
))
38669 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
38670 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
38673 addis_str
= "addis %0,%2,%1+%3@toc@ha";
38675 else if (TARGET_XCOFF
)
38676 addis_str
= "addis %0,%1+%3@u(%2)";
38679 gcc_unreachable ();
38683 else if (satisfies_constraint_L (value
))
38685 fuse_ops
[1] = value
;
38686 addis_str
= "lis %0,%v1";
38689 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
38691 fuse_ops
[1] = value
;
38692 addis_str
= "lis %0,%1@ha";
38697 fatal_insn ("Could not generate addis value for fusion", addis_value
);
38699 sprintf (insn_template
, "%s\t\t%s %s, type %s", addis_str
, comment_str
,
38700 comment
, mode_name
);
38701 output_asm_insn (insn_template
, fuse_ops
);
38704 /* Emit a D-form load or store instruction that is the second instruction
38705 of a fusion sequence. */
38708 emit_fusion_load_store (rtx load_store_reg
, rtx addis_reg
, rtx offset
,
38709 const char *insn_str
)
38712 char insn_template
[80];
38714 fuse_ops
[0] = load_store_reg
;
38715 fuse_ops
[1] = addis_reg
;
38717 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
38719 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
38720 fuse_ops
[2] = offset
;
38721 output_asm_insn (insn_template
, fuse_ops
);
38724 else if (GET_CODE (offset
) == UNSPEC
38725 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38728 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38730 else if (TARGET_XCOFF
)
38731 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38734 gcc_unreachable ();
38736 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38737 output_asm_insn (insn_template
, fuse_ops
);
38740 else if (GET_CODE (offset
) == PLUS
38741 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38742 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38743 && CONST_INT_P (XEXP (offset
, 1)))
38745 rtx tocrel_unspec
= XEXP (offset
, 0);
38747 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38749 else if (TARGET_XCOFF
)
38750 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38753 gcc_unreachable ();
38755 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38756 fuse_ops
[3] = XEXP (offset
, 1);
38757 output_asm_insn (insn_template
, fuse_ops
);
38760 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38762 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38764 fuse_ops
[2] = offset
;
38765 output_asm_insn (insn_template
, fuse_ops
);
38769 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38774 /* Wrap a TOC address that can be fused to indicate that special fusion
38775 processing is needed. */
38778 fusion_wrap_memory_address (rtx old_mem
)
38780 rtx old_addr
= XEXP (old_mem
, 0);
38781 rtvec v
= gen_rtvec (1, old_addr
);
38782 rtx new_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_FUSION_ADDIS
);
38783 return replace_equiv_address_nv (old_mem
, new_addr
, false);
38786 /* Given an address, convert it into the addis and load offset parts. Addresses
38787 created during the peephole2 process look like:
38788 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38789 (unspec [(...)] UNSPEC_TOCREL))
38791 Addresses created via toc fusion look like:
38792 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38795 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38799 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_FUSION_ADDIS
)
38801 lo
= XVECEXP (addr
, 0, 0);
38802 hi
= gen_rtx_HIGH (Pmode
, lo
);
38804 else if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38806 hi
= XEXP (addr
, 0);
38807 lo
= XEXP (addr
, 1);
38810 gcc_unreachable ();
38816 /* Return a string to fuse an addis instruction with a gpr load to the same
38817 register that we loaded up the addis instruction. The address that is used
38818 is the logical address that was formed during peephole2:
38819 (lo_sum (high) (low-part))
38821 Or the address is the TOC address that is wrapped before register allocation:
38822 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38824 The code is complicated, so we call output_asm_insn directly, and just
38828 emit_fusion_gpr_load (rtx target
, rtx mem
)
38833 const char *load_str
= NULL
;
38834 const char *mode_name
= NULL
;
38837 if (GET_CODE (mem
) == ZERO_EXTEND
)
38838 mem
= XEXP (mem
, 0);
38840 gcc_assert (REG_P (target
) && MEM_P (mem
));
38842 addr
= XEXP (mem
, 0);
38843 fusion_split_address (addr
, &addis_value
, &load_offset
);
38845 /* Now emit the load instruction to the same register. */
38846 mode
= GET_MODE (mem
);
38850 mode_name
= "char";
38855 mode_name
= "short";
38861 mode_name
= (mode
== SFmode
) ? "float" : "int";
38867 gcc_assert (TARGET_POWERPC64
);
38868 mode_name
= (mode
== DFmode
) ? "double" : "long";
38873 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38876 /* Emit the addis instruction. */
38877 emit_fusion_addis (target
, addis_value
, "gpr load fusion", mode_name
);
38879 /* Emit the D-form load instruction. */
38880 emit_fusion_load_store (target
, target
, load_offset
, load_str
);
38886 /* Return true if the peephole2 can combine a load/store involving a
38887 combination of an addis instruction and the memory operation. This was
38888 added to the ISA 3.0 (power9) hardware. */
38891 fusion_p9_p (rtx addis_reg
, /* register set via addis. */
38892 rtx addis_value
, /* addis value. */
38893 rtx dest
, /* destination (memory or register). */
38894 rtx src
) /* source (register or memory). */
38896 rtx addr
, mem
, offset
;
38897 machine_mode mode
= GET_MODE (src
);
38899 /* Validate arguments. */
38900 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38903 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38906 /* Ignore extend operations that are part of the load. */
38907 if (GET_CODE (src
) == FLOAT_EXTEND
|| GET_CODE (src
) == ZERO_EXTEND
)
38908 src
= XEXP (src
, 0);
38910 /* Test for memory<-register or register<-memory. */
38911 if (fpr_reg_operand (src
, mode
) || int_reg_operand (src
, mode
))
38919 else if (MEM_P (src
))
38921 if (!fpr_reg_operand (dest
, mode
) && !int_reg_operand (dest
, mode
))
38930 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38931 if (GET_CODE (addr
) == PLUS
)
38933 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38936 return satisfies_constraint_I (XEXP (addr
, 1));
38939 else if (GET_CODE (addr
) == LO_SUM
)
38941 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38944 offset
= XEXP (addr
, 1);
38945 if (TARGET_XCOFF
|| (TARGET_ELF
&& TARGET_POWERPC64
))
38946 return small_toc_ref (offset
, GET_MODE (offset
));
38948 else if (TARGET_ELF
&& !TARGET_POWERPC64
)
38949 return CONSTANT_P (offset
);
38955 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38959 operands[0] register set with addis
38960 operands[1] value set via addis
38961 operands[2] target register being loaded
38962 operands[3] D-form memory reference using operands[0].
38964 This is similar to the fusion introduced with power8, except it scales to
38965 both loads/stores and does not require the result register to be the same as
38966 the base register. At the moment, we only do this if register set with addis
38970 expand_fusion_p9_load (rtx
*operands
)
38972 rtx tmp_reg
= operands
[0];
38973 rtx addis_value
= operands
[1];
38974 rtx target
= operands
[2];
38975 rtx orig_mem
= operands
[3];
38976 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
;
38977 enum rtx_code plus_or_lo_sum
;
38978 machine_mode target_mode
= GET_MODE (target
);
38979 machine_mode extend_mode
= target_mode
;
38980 machine_mode ptr_mode
= Pmode
;
38981 enum rtx_code extend
= UNKNOWN
;
38983 if (GET_CODE (orig_mem
) == FLOAT_EXTEND
|| GET_CODE (orig_mem
) == ZERO_EXTEND
)
38985 extend
= GET_CODE (orig_mem
);
38986 orig_mem
= XEXP (orig_mem
, 0);
38987 target_mode
= GET_MODE (orig_mem
);
38990 gcc_assert (MEM_P (orig_mem
));
38992 orig_addr
= XEXP (orig_mem
, 0);
38993 plus_or_lo_sum
= GET_CODE (orig_addr
);
38994 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38996 offset
= XEXP (orig_addr
, 1);
38997 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38998 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
39000 if (extend
!= UNKNOWN
)
39001 new_mem
= gen_rtx_fmt_e (extend
, extend_mode
, new_mem
);
39003 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
39006 set
= gen_rtx_SET (target
, new_mem
);
39007 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
39008 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
39014 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39018 operands[0] register set with addis
39019 operands[1] value set via addis
39020 operands[2] target D-form memory being stored to
39021 operands[3] register being stored
39023 This is similar to the fusion introduced with power8, except it scales to
39024 both loads/stores and does not require the result register to be the same as
39025 the base register. At the moment, we only do this if register set with addis
39029 expand_fusion_p9_store (rtx
*operands
)
39031 rtx tmp_reg
= operands
[0];
39032 rtx addis_value
= operands
[1];
39033 rtx orig_mem
= operands
[2];
39034 rtx src
= operands
[3];
39035 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
, new_src
;
39036 enum rtx_code plus_or_lo_sum
;
39037 machine_mode target_mode
= GET_MODE (orig_mem
);
39038 machine_mode ptr_mode
= Pmode
;
39040 gcc_assert (MEM_P (orig_mem
));
39042 orig_addr
= XEXP (orig_mem
, 0);
39043 plus_or_lo_sum
= GET_CODE (orig_addr
);
39044 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
39046 offset
= XEXP (orig_addr
, 1);
39047 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
39048 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
39050 new_src
= gen_rtx_UNSPEC (target_mode
, gen_rtvec (1, src
),
39053 set
= gen_rtx_SET (new_mem
, new_src
);
39054 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
39055 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
39061 /* Return a string to fuse an addis instruction with a load using extended
39062 fusion. The address that is used is the logical address that was formed
39063 during peephole2: (lo_sum (high) (low-part))
39065 The code is complicated, so we call output_asm_insn directly, and just
39069 emit_fusion_p9_load (rtx reg
, rtx mem
, rtx tmp_reg
)
39071 machine_mode mode
= GET_MODE (reg
);
39075 const char *load_string
;
39078 if (GET_CODE (mem
) == FLOAT_EXTEND
|| GET_CODE (mem
) == ZERO_EXTEND
)
39080 mem
= XEXP (mem
, 0);
39081 mode
= GET_MODE (mem
);
39084 if (GET_CODE (reg
) == SUBREG
)
39086 gcc_assert (SUBREG_BYTE (reg
) == 0);
39087 reg
= SUBREG_REG (reg
);
39091 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg
);
39094 if (FP_REGNO_P (r
))
39096 if (mode
== SFmode
)
39097 load_string
= "lfs";
39098 else if (mode
== DFmode
|| mode
== DImode
)
39099 load_string
= "lfd";
39101 gcc_unreachable ();
39103 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
39105 if (mode
== SFmode
)
39106 load_string
= "lxssp";
39107 else if (mode
== DFmode
|| mode
== DImode
)
39108 load_string
= "lxsd";
39110 gcc_unreachable ();
39112 else if (INT_REGNO_P (r
))
39117 load_string
= "lbz";
39120 load_string
= "lhz";
39124 load_string
= "lwz";
39128 if (!TARGET_POWERPC64
)
39129 gcc_unreachable ();
39130 load_string
= "ld";
39133 gcc_unreachable ();
39137 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg
);
39140 fatal_insn ("emit_fusion_p9_load not MEM", mem
);
39142 addr
= XEXP (mem
, 0);
39143 fusion_split_address (addr
, &hi
, &lo
);
39145 /* Emit the addis instruction. */
39146 emit_fusion_addis (tmp_reg
, hi
, "power9 load fusion", GET_MODE_NAME (mode
));
39148 /* Emit the D-form load instruction. */
39149 emit_fusion_load_store (reg
, tmp_reg
, lo
, load_string
);
39154 /* Return a string to fuse an addis instruction with a store using extended
39155 fusion. The address that is used is the logical address that was formed
39156 during peephole2: (lo_sum (high) (low-part))
39158 The code is complicated, so we call output_asm_insn directly, and just
39162 emit_fusion_p9_store (rtx mem
, rtx reg
, rtx tmp_reg
)
39164 machine_mode mode
= GET_MODE (reg
);
39168 const char *store_string
;
39171 if (GET_CODE (reg
) == SUBREG
)
39173 gcc_assert (SUBREG_BYTE (reg
) == 0);
39174 reg
= SUBREG_REG (reg
);
39178 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg
);
39181 if (FP_REGNO_P (r
))
39183 if (mode
== SFmode
)
39184 store_string
= "stfs";
39185 else if (mode
== DFmode
)
39186 store_string
= "stfd";
39188 gcc_unreachable ();
39190 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
39192 if (mode
== SFmode
)
39193 store_string
= "stxssp";
39194 else if (mode
== DFmode
|| mode
== DImode
)
39195 store_string
= "stxsd";
39197 gcc_unreachable ();
39199 else if (INT_REGNO_P (r
))
39204 store_string
= "stb";
39207 store_string
= "sth";
39211 store_string
= "stw";
39215 if (!TARGET_POWERPC64
)
39216 gcc_unreachable ();
39217 store_string
= "std";
39220 gcc_unreachable ();
39224 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg
);
39227 fatal_insn ("emit_fusion_p9_store not MEM", mem
);
39229 addr
= XEXP (mem
, 0);
39230 fusion_split_address (addr
, &hi
, &lo
);
39232 /* Emit the addis instruction. */
39233 emit_fusion_addis (tmp_reg
, hi
, "power9 store fusion", GET_MODE_NAME (mode
));
39235 /* Emit the D-form load instruction. */
39236 emit_fusion_load_store (reg
, tmp_reg
, lo
, store_string
);
39241 #ifdef RS6000_GLIBC_ATOMIC_FENV
39242 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39243 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
39246 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39249 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
39251 if (!TARGET_HARD_FLOAT
)
39253 #ifdef RS6000_GLIBC_ATOMIC_FENV
39254 if (atomic_hold_decl
== NULL_TREE
)
39257 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39258 get_identifier ("__atomic_feholdexcept"),
39259 build_function_type_list (void_type_node
,
39260 double_ptr_type_node
,
39262 TREE_PUBLIC (atomic_hold_decl
) = 1;
39263 DECL_EXTERNAL (atomic_hold_decl
) = 1;
39266 if (atomic_clear_decl
== NULL_TREE
)
39269 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39270 get_identifier ("__atomic_feclearexcept"),
39271 build_function_type_list (void_type_node
,
39273 TREE_PUBLIC (atomic_clear_decl
) = 1;
39274 DECL_EXTERNAL (atomic_clear_decl
) = 1;
39277 tree const_double
= build_qualified_type (double_type_node
,
39279 tree const_double_ptr
= build_pointer_type (const_double
);
39280 if (atomic_update_decl
== NULL_TREE
)
39283 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
39284 get_identifier ("__atomic_feupdateenv"),
39285 build_function_type_list (void_type_node
,
39288 TREE_PUBLIC (atomic_update_decl
) = 1;
39289 DECL_EXTERNAL (atomic_update_decl
) = 1;
39292 tree fenv_var
= create_tmp_var_raw (double_type_node
);
39293 TREE_ADDRESSABLE (fenv_var
) = 1;
39294 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
39296 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
39297 *clear
= build_call_expr (atomic_clear_decl
, 0);
39298 *update
= build_call_expr (atomic_update_decl
, 1,
39299 fold_convert (const_double_ptr
, fenv_addr
));
39304 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
39305 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
39306 tree call_mffs
= build_call_expr (mffs
, 0);
39308 /* Generates the equivalent of feholdexcept (&fenv_var)
39310 *fenv_var = __builtin_mffs ();
39312 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39313 __builtin_mtfsf (0xff, fenv_hold); */
39315 /* Mask to clear everything except for the rounding modes and non-IEEE
39316 arithmetic flag. */
39317 const unsigned HOST_WIDE_INT hold_exception_mask
=
39318 HOST_WIDE_INT_C (0xffffffff00000007);
39320 tree fenv_var
= create_tmp_var_raw (double_type_node
);
39322 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
39324 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
39325 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
39326 build_int_cst (uint64_type_node
,
39327 hold_exception_mask
));
39329 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39332 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
39333 build_int_cst (unsigned_type_node
, 0xff),
39336 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
39338 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39340 double fenv_clear = __builtin_mffs ();
39341 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39342 __builtin_mtfsf (0xff, fenv_clear); */
39344 /* Mask to clear everything except for the rounding modes and non-IEEE
39345 arithmetic flag. */
39346 const unsigned HOST_WIDE_INT clear_exception_mask
=
39347 HOST_WIDE_INT_C (0xffffffff00000000);
39349 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
39351 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
39353 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
39354 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
39356 build_int_cst (uint64_type_node
,
39357 clear_exception_mask
));
39359 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39360 fenv_clear_llu_and
);
39362 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
39363 build_int_cst (unsigned_type_node
, 0xff),
39366 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
39368 /* Generates the equivalent of feupdateenv (&fenv_var)
39370 double old_fenv = __builtin_mffs ();
39371 double fenv_update;
39372 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39373 (*(uint64_t*)fenv_var 0x1ff80fff);
39374 __builtin_mtfsf (0xff, fenv_update); */
39376 const unsigned HOST_WIDE_INT update_exception_mask
=
39377 HOST_WIDE_INT_C (0xffffffff1fffff00);
39378 const unsigned HOST_WIDE_INT new_exception_mask
=
39379 HOST_WIDE_INT_C (0x1ff80fff);
39381 tree old_fenv
= create_tmp_var_raw (double_type_node
);
39382 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
39384 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
39385 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
39386 build_int_cst (uint64_type_node
,
39387 update_exception_mask
));
39389 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
39390 build_int_cst (uint64_type_node
,
39391 new_exception_mask
));
39393 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
39394 old_llu_and
, new_llu_and
);
39396 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
39399 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
39400 build_int_cst (unsigned_type_node
, 0xff),
39401 fenv_update_mtfsf
);
39403 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
39407 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
39409 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39411 rtx_tmp0
= gen_reg_rtx (V2DImode
);
39412 rtx_tmp1
= gen_reg_rtx (V2DImode
);
39414 /* The destination of the vmrgew instruction layout is:
39415 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39416 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39417 vmrgew instruction will be correct. */
39418 if (VECTOR_ELT_ORDER_BIG
)
39420 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39421 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39425 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
39426 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
39429 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
39430 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
39432 if (signed_convert
)
39434 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
39435 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
39439 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
39440 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
39443 if (VECTOR_ELT_ORDER_BIG
)
39444 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
39446 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
39450 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
39453 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
39455 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
39456 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
39458 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
39459 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
39461 rtx_tmp2
= gen_reg_rtx (V4SImode
);
39462 rtx_tmp3
= gen_reg_rtx (V4SImode
);
39464 if (signed_convert
)
39466 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
39467 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
39471 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
39472 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
39475 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
39478 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39481 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
39482 optimization_type opt_type
)
39487 return (opt_type
== OPTIMIZE_FOR_SPEED
39488 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
39495 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39497 static HOST_WIDE_INT
39498 rs6000_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
39500 if (TREE_CODE (exp
) == STRING_CST
39501 && (STRICT_ALIGNMENT
|| !optimize_size
))
39502 return MAX (align
, BITS_PER_WORD
);
39506 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39508 static HOST_WIDE_INT
39509 rs6000_starting_frame_offset (void)
39511 if (FRAME_GROWS_DOWNWARD
)
39513 return RS6000_STARTING_FRAME_OFFSET
;
39516 struct gcc_target targetm
= TARGET_INITIALIZER
;
39518 #include "gt-rs6000.h"