1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
35 #include "stringpool.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
46 #include "fold-const.h"
48 #include "stor-layout.h"
50 #include "print-tree.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
59 #include "sched-int.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
71 #include "tree-vector-builder.h"
73 #include "tree-pass.h"
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
79 #include "gstab.h" /* for N_SLINE */
81 #include "case-cfn-macros.h"
83 #include "tree-ssa-propagate.h"
85 /* This file should be included last. */
86 #include "target-def.h"
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
102 #define TARGET_IEEEQUAD_DEFAULT 0
106 static pad_direction
rs6000_function_arg_padding (machine_mode
, const_tree
);
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack
{
110 int reload_completed
; /* stack info won't change from here on */
111 int first_gp_reg_save
; /* first callee saved GP register used */
112 int first_fp_reg_save
; /* first callee saved FP register used */
113 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
114 int lr_save_p
; /* true if the link reg needs to be saved */
115 int cr_save_p
; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask
; /* mask of vec registers to save */
117 int push_p
; /* true if we need to allocate stack space */
118 int calls_p
; /* true if the function makes any calls */
119 int world_save_p
; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi
; /* which ABI to use */
122 int gp_save_offset
; /* offset to save GP regs from initial SP */
123 int fp_save_offset
; /* offset to save FP regs from initial SP */
124 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset
; /* offset to save LR from initial SP */
126 int cr_save_offset
; /* offset to save CR from initial SP */
127 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset
; /* offset to save the varargs registers */
129 int ehrd_offset
; /* offset to EH return data */
130 int ehcr_offset
; /* offset to EH CR field data */
131 int reg_size
; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size
; /* variable save area size */
133 int parm_size
; /* outgoing parameter size */
134 int save_size
; /* save area size */
135 int fixed_size
; /* fixed size of stack frame */
136 int gp_size
; /* size of saved GP registers */
137 int fp_size
; /* size of saved FP registers */
138 int altivec_size
; /* size of saved AltiVec registers */
139 int cr_size
; /* size to hold CR if not in fixed area */
140 int vrsave_size
; /* size to hold VRSAVE */
141 int altivec_padding_size
; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame
;
152 /* Flags if __builtin_return_address (0) was used. */
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue
;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset
;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer
;
164 bool split_stack_argp_used
;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed
;
167 /* The number of components we use for separate shrink-wrapping. */
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately
[32];
172 bool fpr_is_wrapped_separately
[32];
173 bool lr_is_wrapped_separately
;
174 bool toc_is_wrapped_separately
;
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load
;
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined
;
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno
;
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno
= 0;
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode
;
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128
;
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1
;
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size
;
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float
;
226 static bool rs6000_passes_long_double
;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector
;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct
;
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
246 static int dbg_cost_ctrl
;
248 /* Built in types. */
249 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
250 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
252 /* Flag to say the TOC is initialized */
253 int toc_initialized
, need_toc_init
;
254 char toc_label_name
[10];
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more
;
260 static GTY(()) section
*read_only_data_section
;
261 static GTY(()) section
*private_data_section
;
262 static GTY(()) section
*tls_data_section
;
263 static GTY(()) section
*tls_private_data_section
;
264 static GTY(()) section
*read_only_private_data_section
;
265 static GTY(()) section
*sdata2_section
;
266 static GTY(()) section
*toc_section
;
268 struct builtin_description
270 const HOST_WIDE_INT mask
;
271 const enum insn_code icode
;
272 const char *const name
;
273 const enum rs6000_builtins code
;
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
278 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
280 /* Register classes for various constraints that are based on the target
282 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align
[NUM_MACHINE_MODES
];
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
294 /* Masks to determine which reciprocal esitmate instructions to generate
296 enum rs6000_recip_mask
{
297 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
298 RECIP_DF_DIV
= 0x002,
299 RECIP_V4SF_DIV
= 0x004,
300 RECIP_V2DF_DIV
= 0x008,
302 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT
= 0x020,
304 RECIP_V4SF_RSQRT
= 0x040,
305 RECIP_V2DF_RSQRT
= 0x080,
307 /* Various combination of flags for -mrecip=xxx. */
309 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
313 RECIP_HIGH_PRECISION
= RECIP_ALL
,
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
320 /* -mrecip options. */
323 const char *string
; /* option name */
324 unsigned int mask
; /* mask bits to set */
325 } recip_options
[] = {
326 { "all", RECIP_ALL
},
327 { "none", RECIP_NONE
},
328 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
330 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
331 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
332 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT
) },
334 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
335 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
344 { "power9", PPC_PLATFORM_POWER9
},
345 { "power8", PPC_PLATFORM_POWER8
},
346 { "power7", PPC_PLATFORM_POWER7
},
347 { "power6x", PPC_PLATFORM_POWER6X
},
348 { "power6", PPC_PLATFORM_POWER6
},
349 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
350 { "power5", PPC_PLATFORM_POWER5
},
351 { "ppc970", PPC_PLATFORM_PPC970
},
352 { "power4", PPC_PLATFORM_POWER4
},
353 { "ppca2", PPC_PLATFORM_PPCA2
},
354 { "ppc476", PPC_PLATFORM_PPC476
},
355 { "ppc464", PPC_PLATFORM_PPC464
},
356 { "ppc440", PPC_PLATFORM_PPC440
},
357 { "ppc405", PPC_PLATFORM_PPC405
},
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
367 } cpu_supports_info
[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
374 { "booke", PPC_FEATURE_BOOKE
, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
382 { "notb", PPC_FEATURE_NO_TB
, 0 },
383 { "pa6t", PPC_FEATURE_PA6T
, 0 },
384 { "power4", PPC_FEATURE_POWER4
, 0 },
385 { "power5", PPC_FEATURE_POWER5
, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
388 { "ppc32", PPC_FEATURE_32
, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
390 { "ppc64", PPC_FEATURE_64
, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
392 { "smt", PPC_FEATURE_SMT
, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND
, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
410 { "darn", PPC_FEATURE2_DARN
, 1 },
411 { "scv", PPC_FEATURE2_SCV
, 1 }
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
419 CLONE_DEFAULT
= 0, /* default clone. */
420 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
427 /* Map compiler ISA bits into HWCAP names. */
429 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
430 const char *name
; /* name to use in __builtin_cpu_supports. */
433 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
448 /* True if we have expanded a CPU builtin. */
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
461 enum rs6000_reg_type
{
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
486 enum rs6000_reload_reg_type
{
487 RELOAD_REG_GPR
, /* General purpose registers. */
488 RELOAD_REG_FPR
, /* Traditional floating point regs. */
489 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type
{
502 const char *name
; /* Register class name. */
503 int reg
; /* Register in the register class. */
506 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
507 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type
;
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr
{
529 enum insn_code reload_load
; /* INSN to reload for loading. */
530 enum insn_code reload_store
; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
535 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
538 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
542 mode_supports_pre_incdec_p (machine_mode mode
)
544 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
550 mode_supports_pre_modify_p (machine_mode mode
)
552 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
556 /* Return true if we have D-form addressing in altivec registers. */
558 mode_supports_vmx_dform (machine_mode mode
)
560 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
567 mode_supports_dq_form (machine_mode mode
)
569 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
589 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
596 in_set
= single_set (in_insn
);
599 if (MEM_P (SET_DEST (in_set
)))
601 out_set
= single_set (out_insn
);
604 out_pat
= PATTERN (out_insn
);
605 if (GET_CODE (out_pat
) == PARALLEL
)
607 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
609 out_exp
= XVECEXP (out_pat
, 0, i
);
610 if ((GET_CODE (out_exp
) == CLOBBER
)
611 || (GET_CODE (out_exp
) == USE
))
613 else if (GET_CODE (out_exp
) != SET
)
622 in_pat
= PATTERN (in_insn
);
623 if (GET_CODE (in_pat
) != PARALLEL
)
626 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
628 in_exp
= XVECEXP (in_pat
, 0, i
);
629 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
631 else if (GET_CODE (in_exp
) != SET
)
634 if (MEM_P (SET_DEST (in_exp
)))
636 out_set
= single_set (out_insn
);
639 out_pat
= PATTERN (out_insn
);
640 if (GET_CODE (out_pat
) != PARALLEL
)
642 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
644 out_exp
= XVECEXP (out_pat
, 0, j
);
645 if ((GET_CODE (out_exp
) == CLOBBER
)
646 || (GET_CODE (out_exp
) == USE
))
648 else if (GET_CODE (out_exp
) != SET
)
655 return store_data_bypass_p (out_insn
, in_insn
);
659 /* Processor costs (relative to an add) */
661 const struct processor_costs
*rs6000_cost
;
663 /* Instruction size costs on 32bit processors. */
665 struct processor_costs size32_cost
= {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
685 struct processor_costs size64_cost
= {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
705 struct processor_costs rs64a_cost
= {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
725 struct processor_costs mpccore_cost
= {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
745 struct processor_costs ppc403_cost
= {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
765 struct processor_costs ppc405_cost
= {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
785 struct processor_costs ppc440_cost
= {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
805 struct processor_costs ppc476_cost
= {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
825 struct processor_costs ppc601_cost
= {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
845 struct processor_costs ppc603_cost
= {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
865 struct processor_costs ppc604_cost
= {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
885 struct processor_costs ppc604e_cost
= {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
905 struct processor_costs ppc620_cost
= {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
925 struct processor_costs ppc630_cost
= {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
946 struct processor_costs ppccell_cost
= {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
966 struct processor_costs ppc750_cost
= {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
986 struct processor_costs ppc7450_cost
= {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1006 struct processor_costs ppc8540_cost
= {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1026 struct processor_costs ppce300c2c3_cost
= {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1046 struct processor_costs ppce500mc_cost
= {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1066 struct processor_costs ppce500mc64_cost
= {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1086 struct processor_costs ppce5500_cost
= {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1106 struct processor_costs ppce6500_cost
= {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1126 struct processor_costs titan_cost
= {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1146 struct processor_costs power4_cost
= {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1166 struct processor_costs power6_cost
= {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1186 struct processor_costs power7_cost
= {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1206 struct processor_costs power8_cost
= {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1226 struct processor_costs power9_cost
= {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1246 struct processor_costs ppca2_cost
= {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 struct rs6000_builtin_info_type
{
1305 const enum insn_code icode
;
1306 const HOST_WIDE_INT mask
;
1307 const unsigned attr
;
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1312 #include "rs6000-builtin.def"
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1329 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1330 static struct machine_function
* rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1333 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1334 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1335 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1336 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1337 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1338 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1341 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1343 static bool is_microcoded_insn (rtx_insn
*);
1344 static bool is_nonpipeline_insn (rtx_insn
*);
1345 static bool is_cracked_insn (rtx_insn
*);
1346 static bool is_load_insn (rtx
, rtx
*);
1347 static bool is_store_insn (rtx
, rtx
*);
1348 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1349 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1350 static bool insn_must_be_first_in_group (rtx_insn
*);
1351 static bool insn_must_be_last_in_group (rtx_insn
*);
1352 static void altivec_init_builtins (void);
1353 static tree
builtin_function_type (machine_mode
, machine_mode
,
1354 machine_mode
, machine_mode
,
1355 enum rs6000_builtins
, const char *name
);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t
*rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx
, void *);
1360 int easy_vector_constant (rtx
, machine_mode
);
1361 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1362 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1363 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1366 static void macho_branch_islands (void);
1368 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1370 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1372 static bool rs6000_mode_dependent_address (const_rtx
);
1373 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1374 static bool rs6000_offsettable_memref_p (rtx
, machine_mode
, bool);
1375 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1377 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1380 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1381 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1383 static bool rs6000_debug_secondary_memory_needed (machine_mode
,
1386 static bool rs6000_debug_can_change_mode_class (machine_mode
,
1389 static bool rs6000_save_toc_in_prologue_p (void);
1390 static rtx
rs6000_internal_arg_pointer (void);
1392 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1394 = rs6000_legitimize_reload_address
;
1396 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1397 = rs6000_mode_dependent_address
;
1399 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1401 = rs6000_secondary_reload_class
;
1403 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1404 = rs6000_preferred_reload_class
;
1406 const int INSN_NOT_AVAILABLE
= -1;
1408 static void rs6000_print_isa_options (FILE *, int, const char *,
1410 static void rs6000_print_builtin_options (FILE *, int, const char *,
1412 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1414 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1415 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1416 enum rs6000_reg_type
,
1418 secondary_reload_info
*,
1420 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1421 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1422 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1424 /* Hash table stuff for keeping track of TOC entries. */
1426 struct GTY((for_user
)) toc_hash_struct
1428 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1429 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1431 machine_mode key_mode
;
1435 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1437 static hashval_t
hash (toc_hash_struct
*);
1438 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1441 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1443 /* Hash table to keep track of the argument types for builtin functions. */
1445 struct GTY((for_user
)) builtin_hash_struct
1448 machine_mode mode
[4]; /* return value + 3 arguments. */
1449 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1452 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1454 static hashval_t
hash (builtin_hash_struct
*);
1455 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1458 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1461 /* Default register names. */
1462 char rs6000_reg_names
[][8] =
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1468 "0", "1", "2", "3", "4", "5", "6", "7",
1469 "8", "9", "10", "11", "12", "13", "14", "15",
1470 "16", "17", "18", "19", "20", "21", "22", "23",
1471 "24", "25", "26", "27", "28", "29", "30", "31",
1472 "mq", "lr", "ctr","ap",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1475 /* AltiVec registers. */
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "8", "9", "10", "11", "12", "13", "14", "15",
1478 "16", "17", "18", "19", "20", "21", "22", "23",
1479 "24", "25", "26", "27", "28", "29", "30", "31",
1481 /* Soft frame pointer. */
1483 /* HTM SPR registers. */
1484 "tfhar", "tfiar", "texasr"
1487 #ifdef TARGET_REGNAMES
1488 static const char alt_reg_names
[][8] =
1490 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1491 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1492 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1493 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1494 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1495 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1496 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1497 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1498 "mq", "lr", "ctr", "ap",
1499 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1501 /* AltiVec registers. */
1502 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1503 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1504 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1505 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1507 /* Soft frame pointer. */
1509 /* HTM SPR registers. */
1510 "tfhar", "tfiar", "texasr"
1514 /* Table of valid machine attributes. */
1516 static const struct attribute_spec rs6000_attribute_table
[] =
1518 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1519 affects_type_identity, handler, exclude } */
1520 { "altivec", 1, 1, false, true, false, false,
1521 rs6000_handle_altivec_attribute
, NULL
},
1522 { "longcall", 0, 0, false, true, true, false,
1523 rs6000_handle_longcall_attribute
, NULL
},
1524 { "shortcall", 0, 0, false, true, true, false,
1525 rs6000_handle_longcall_attribute
, NULL
},
1526 { "ms_struct", 0, 0, false, false, false, false,
1527 rs6000_handle_struct_attribute
, NULL
},
1528 { "gcc_struct", 0, 0, false, false, false, false,
1529 rs6000_handle_struct_attribute
, NULL
},
1530 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1531 SUBTARGET_ATTRIBUTE_TABLE
,
1533 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
1536 #ifndef TARGET_PROFILE_KERNEL
1537 #define TARGET_PROFILE_KERNEL 0
1540 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1541 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1543 /* Initialize the GCC target structure. */
1544 #undef TARGET_ATTRIBUTE_TABLE
1545 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1546 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1547 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1548 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1549 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1551 #undef TARGET_ASM_ALIGNED_DI_OP
1552 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1554 /* Default unaligned ops are only provided for ELF. Find the ops needed
1555 for non-ELF systems. */
1556 #ifndef OBJECT_FORMAT_ELF
1558 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1560 #undef TARGET_ASM_UNALIGNED_HI_OP
1561 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1562 #undef TARGET_ASM_UNALIGNED_SI_OP
1563 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1564 #undef TARGET_ASM_UNALIGNED_DI_OP
1565 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1568 #undef TARGET_ASM_UNALIGNED_HI_OP
1569 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1570 #undef TARGET_ASM_UNALIGNED_SI_OP
1571 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1572 #undef TARGET_ASM_UNALIGNED_DI_OP
1573 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1579 /* This hook deals with fixups for relocatable code and DI-mode objects
1581 #undef TARGET_ASM_INTEGER
1582 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1584 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1585 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1586 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1589 #undef TARGET_SET_UP_BY_PROLOGUE
1590 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1592 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1593 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1594 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1595 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1596 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1597 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1598 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1602 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1605 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1606 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1608 #undef TARGET_INTERNAL_ARG_POINTER
1609 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1611 #undef TARGET_HAVE_TLS
1612 #define TARGET_HAVE_TLS HAVE_AS_TLS
1614 #undef TARGET_CANNOT_FORCE_CONST_MEM
1615 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1617 #undef TARGET_DELEGITIMIZE_ADDRESS
1618 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1620 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1621 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1623 #undef TARGET_LEGITIMATE_COMBINED_INSN
1624 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1626 #undef TARGET_ASM_FUNCTION_PROLOGUE
1627 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1628 #undef TARGET_ASM_FUNCTION_EPILOGUE
1629 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1631 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1632 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1634 #undef TARGET_LEGITIMIZE_ADDRESS
1635 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1637 #undef TARGET_SCHED_VARIABLE_ISSUE
1638 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1640 #undef TARGET_SCHED_ISSUE_RATE
1641 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1642 #undef TARGET_SCHED_ADJUST_COST
1643 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1644 #undef TARGET_SCHED_ADJUST_PRIORITY
1645 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1646 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1647 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1648 #undef TARGET_SCHED_INIT
1649 #define TARGET_SCHED_INIT rs6000_sched_init
1650 #undef TARGET_SCHED_FINISH
1651 #define TARGET_SCHED_FINISH rs6000_sched_finish
1652 #undef TARGET_SCHED_REORDER
1653 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1654 #undef TARGET_SCHED_REORDER2
1655 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1657 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1658 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1663 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1664 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1665 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1666 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1667 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1668 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1669 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1670 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1672 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1673 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1675 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1676 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1677 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1678 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1679 rs6000_builtin_support_vector_misalignment
1680 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1681 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1682 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1683 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1684 rs6000_builtin_vectorization_cost
1685 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1686 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1687 rs6000_preferred_simd_mode
1688 #undef TARGET_VECTORIZE_INIT_COST
1689 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1690 #undef TARGET_VECTORIZE_ADD_STMT_COST
1691 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1692 #undef TARGET_VECTORIZE_FINISH_COST
1693 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1694 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1695 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1697 #undef TARGET_INIT_BUILTINS
1698 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1699 #undef TARGET_BUILTIN_DECL
1700 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1702 #undef TARGET_FOLD_BUILTIN
1703 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1704 #undef TARGET_GIMPLE_FOLD_BUILTIN
1705 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1707 #undef TARGET_EXPAND_BUILTIN
1708 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1710 #undef TARGET_MANGLE_TYPE
1711 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1713 #undef TARGET_INIT_LIBFUNCS
1714 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1717 #undef TARGET_BINDS_LOCAL_P
1718 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1721 #undef TARGET_MS_BITFIELD_LAYOUT_P
1722 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1724 #undef TARGET_ASM_OUTPUT_MI_THUNK
1725 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1727 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1730 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1731 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1733 #undef TARGET_REGISTER_MOVE_COST
1734 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1735 #undef TARGET_MEMORY_MOVE_COST
1736 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1737 #undef TARGET_CANNOT_COPY_INSN_P
1738 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1739 #undef TARGET_RTX_COSTS
1740 #define TARGET_RTX_COSTS rs6000_rtx_costs
1741 #undef TARGET_ADDRESS_COST
1742 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1743 #undef TARGET_INSN_COST
1744 #define TARGET_INSN_COST rs6000_insn_cost
1746 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1747 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1749 #undef TARGET_PROMOTE_FUNCTION_MODE
1750 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1752 #undef TARGET_RETURN_IN_MEMORY
1753 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1755 #undef TARGET_RETURN_IN_MSB
1756 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1758 #undef TARGET_SETUP_INCOMING_VARARGS
1759 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1761 /* Always strict argument naming on rs6000. */
1762 #undef TARGET_STRICT_ARGUMENT_NAMING
1763 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1765 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_SPLIT_COMPLEX_ARG
1767 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1768 #undef TARGET_MUST_PASS_IN_STACK
1769 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1770 #undef TARGET_PASS_BY_REFERENCE
1771 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1772 #undef TARGET_ARG_PARTIAL_BYTES
1773 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1774 #undef TARGET_FUNCTION_ARG_ADVANCE
1775 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1776 #undef TARGET_FUNCTION_ARG
1777 #define TARGET_FUNCTION_ARG rs6000_function_arg
1778 #undef TARGET_FUNCTION_ARG_PADDING
1779 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1780 #undef TARGET_FUNCTION_ARG_BOUNDARY
1781 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1783 #undef TARGET_BUILD_BUILTIN_VA_LIST
1784 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1786 #undef TARGET_EXPAND_BUILTIN_VA_START
1787 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1789 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1790 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1792 #undef TARGET_EH_RETURN_FILTER_MODE
1793 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1795 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1796 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1798 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1799 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1801 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1802 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1804 #undef TARGET_FLOATN_MODE
1805 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1807 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1808 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1810 #undef TARGET_MD_ASM_ADJUST
1811 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1813 #undef TARGET_OPTION_OVERRIDE
1814 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1816 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1817 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1818 rs6000_builtin_vectorized_function
1820 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1821 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1822 rs6000_builtin_md_vectorized_function
1824 #undef TARGET_STACK_PROTECT_GUARD
1825 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1828 #undef TARGET_STACK_PROTECT_FAIL
1829 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1837 /* Use a 32-bit anchor range. This leads to sequences like:
1839 addis tmp,anchor,high
1842 where tmp itself acts as an anchor, and can be shared between
1843 accesses to the same 64k page. */
1844 #undef TARGET_MIN_ANCHOR_OFFSET
1845 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1846 #undef TARGET_MAX_ANCHOR_OFFSET
1847 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1848 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1849 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1850 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1851 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1853 #undef TARGET_BUILTIN_RECIPROCAL
1854 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1856 #undef TARGET_SECONDARY_RELOAD
1857 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED
1859 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1861 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1863 #undef TARGET_LEGITIMATE_ADDRESS_P
1864 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1866 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1867 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1869 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1870 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1872 #undef TARGET_CAN_ELIMINATE
1873 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1875 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1876 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1878 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1879 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1881 #undef TARGET_TRAMPOLINE_INIT
1882 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1884 #undef TARGET_FUNCTION_VALUE
1885 #define TARGET_FUNCTION_VALUE rs6000_function_value
1887 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1888 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1890 #undef TARGET_OPTION_SAVE
1891 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1893 #undef TARGET_OPTION_RESTORE
1894 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1896 #undef TARGET_OPTION_PRINT
1897 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1899 #undef TARGET_CAN_INLINE_P
1900 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1902 #undef TARGET_SET_CURRENT_FUNCTION
1903 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1905 #undef TARGET_LEGITIMATE_CONSTANT_P
1906 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1908 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1909 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1911 #undef TARGET_CAN_USE_DOLOOP_P
1912 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1914 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1915 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1917 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1918 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1919 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1920 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1921 #undef TARGET_UNWIND_WORD_MODE
1922 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1924 #undef TARGET_OFFLOAD_OPTIONS
1925 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1927 #undef TARGET_C_MODE_FOR_SUFFIX
1928 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1930 #undef TARGET_INVALID_BINARY_OP
1931 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1933 #undef TARGET_OPTAB_SUPPORTED_P
1934 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1936 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1937 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1939 #undef TARGET_COMPARE_VERSION_PRIORITY
1940 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1942 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1943 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1944 rs6000_generate_version_dispatcher_body
1946 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1947 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1948 rs6000_get_function_versions_dispatcher
1950 #undef TARGET_OPTION_FUNCTION_VERSIONS
1951 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1953 #undef TARGET_HARD_REGNO_NREGS
1954 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1955 #undef TARGET_HARD_REGNO_MODE_OK
1956 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1958 #undef TARGET_MODES_TIEABLE_P
1959 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1963 rs6000_hard_regno_call_part_clobbered
1965 #undef TARGET_SLOW_UNALIGNED_ACCESS
1966 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1968 #undef TARGET_CAN_CHANGE_MODE_CLASS
1969 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1971 #undef TARGET_CONSTANT_ALIGNMENT
1972 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1974 #undef TARGET_STARTING_FRAME_OFFSET
1975 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1977 #if TARGET_ELF && RS6000_WEAK
1978 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1979 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1982 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1983 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1985 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1986 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1989 /* Processor table. */
1992 const char *const name
; /* Canonical processor name. */
1993 const enum processor_type processor
; /* Processor type enum value. */
1994 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1997 static struct rs6000_ptt
const processor_target_table
[] =
1999 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2000 #include "rs6000-cpus.def"
2004 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2008 rs6000_cpu_name_lookup (const char *name
)
2014 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2015 if (! strcmp (name
, processor_target_table
[i
].name
))
2023 /* Return number of consecutive hard regs needed starting at reg REGNO
2024 to hold something of mode MODE.
2025 This is ordinarily the length in words of a value of mode MODE
2026 but can be less for certain modes in special long registers.
2028 POWER and PowerPC GPRs hold 32 bits worth;
2029 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2032 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2034 unsigned HOST_WIDE_INT reg_size
;
2036 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2037 128-bit floating point that can go in vector registers, which has VSX
2038 memory addressing. */
2039 if (FP_REGNO_P (regno
))
2040 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2041 ? UNITS_PER_VSX_WORD
2042 : UNITS_PER_FP_WORD
);
2044 else if (ALTIVEC_REGNO_P (regno
))
2045 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2048 reg_size
= UNITS_PER_WORD
;
2050 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2053 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2056 rs6000_hard_regno_mode_ok_uncached (int regno
, machine_mode mode
)
2058 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2060 if (COMPLEX_MODE_P (mode
))
2061 mode
= GET_MODE_INNER (mode
);
2063 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2064 register combinations, and use PTImode where we need to deal with quad
2065 word memory operations. Don't allow quad words in the argument or frame
2066 pointer registers, just registers 0..31. */
2067 if (mode
== PTImode
)
2068 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2069 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2070 && ((regno
& 1) == 0));
2072 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2073 implementations. Don't allow an item to be split between a FP register
2074 and an Altivec register. Allow TImode in all VSX registers if the user
2076 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2077 && (VECTOR_MEM_VSX_P (mode
)
2078 || FLOAT128_VECTOR_P (mode
)
2079 || reg_addr
[mode
].scalar_in_vmx_p
2081 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2083 if (FP_REGNO_P (regno
))
2084 return FP_REGNO_P (last_regno
);
2086 if (ALTIVEC_REGNO_P (regno
))
2088 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2091 return ALTIVEC_REGNO_P (last_regno
);
2095 /* The GPRs can hold any mode, but values bigger than one register
2096 cannot go past R31. */
2097 if (INT_REGNO_P (regno
))
2098 return INT_REGNO_P (last_regno
);
2100 /* The float registers (except for VSX vector modes) can only hold floating
2101 modes and DImode. */
2102 if (FP_REGNO_P (regno
))
2104 if (FLOAT128_VECTOR_P (mode
))
2107 if (SCALAR_FLOAT_MODE_P (mode
)
2108 && (mode
!= TDmode
|| (regno
% 2) == 0)
2109 && FP_REGNO_P (last_regno
))
2112 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2114 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2117 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2120 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2127 /* The CR register can only hold CC modes. */
2128 if (CR_REGNO_P (regno
))
2129 return GET_MODE_CLASS (mode
) == MODE_CC
;
2131 if (CA_REGNO_P (regno
))
2132 return mode
== Pmode
|| mode
== SImode
;
2134 /* AltiVec only in AldyVec registers. */
2135 if (ALTIVEC_REGNO_P (regno
))
2136 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2137 || mode
== V1TImode
);
2139 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2140 and it must be able to fit within the register set. */
2142 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2145 /* Implement TARGET_HARD_REGNO_NREGS. */
2148 rs6000_hard_regno_nregs_hook (unsigned int regno
, machine_mode mode
)
2150 return rs6000_hard_regno_nregs
[mode
][regno
];
2153 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2156 rs6000_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2158 return rs6000_hard_regno_mode_ok_p
[mode
][regno
];
2161 /* Implement TARGET_MODES_TIEABLE_P.
2163 PTImode cannot tie with other modes because PTImode is restricted to even
2164 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2167 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2168 128-bit floating point on VSX systems ties with other vectors. */
2171 rs6000_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2173 if (mode1
== PTImode
)
2174 return mode2
== PTImode
;
2175 if (mode2
== PTImode
)
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1
))
2179 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2
);
2180 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2
))
2183 if (SCALAR_FLOAT_MODE_P (mode1
))
2184 return SCALAR_FLOAT_MODE_P (mode2
);
2185 if (SCALAR_FLOAT_MODE_P (mode2
))
2188 if (GET_MODE_CLASS (mode1
) == MODE_CC
)
2189 return GET_MODE_CLASS (mode2
) == MODE_CC
;
2190 if (GET_MODE_CLASS (mode2
) == MODE_CC
)
2196 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2199 rs6000_hard_regno_call_part_clobbered (unsigned int regno
, machine_mode mode
)
2203 && GET_MODE_SIZE (mode
) > 4
2204 && INT_REGNO_P (regno
))
2208 && FP_REGNO_P (regno
)
2209 && GET_MODE_SIZE (mode
) > 8
2210 && !FLOAT128_2REG_P (mode
))
2216 /* Print interesting facts about registers. */
2218 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2222 for (r
= first_regno
; r
<= last_regno
; ++r
)
2224 const char *comma
= "";
2227 if (first_regno
== last_regno
)
2228 fprintf (stderr
, "%s:\t", reg_name
);
2230 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2233 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2234 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2238 fprintf (stderr
, ",\n\t");
2243 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2244 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2245 rs6000_hard_regno_nregs
[m
][r
]);
2247 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2252 if (call_used_regs
[r
])
2256 fprintf (stderr
, ",\n\t");
2261 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2269 fprintf (stderr
, ",\n\t");
2274 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2280 fprintf (stderr
, ",\n\t");
2284 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2285 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2290 fprintf (stderr
, ",\n\t");
2294 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2299 rs6000_debug_vector_unit (enum rs6000_vector v
)
2305 case VECTOR_NONE
: ret
= "none"; break;
2306 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2307 case VECTOR_VSX
: ret
= "vsx"; break;
2308 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2309 default: ret
= "unknown"; break;
2315 /* Inner function printing just the address mask for a particular reload
2317 DEBUG_FUNCTION
char *
2318 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2323 if ((mask
& RELOAD_REG_VALID
) != 0)
2325 else if (keep_spaces
)
2328 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2330 else if (keep_spaces
)
2333 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2335 else if (keep_spaces
)
2338 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2340 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2342 else if (keep_spaces
)
2345 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2347 else if (keep_spaces
)
2350 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2352 else if (keep_spaces
)
2355 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2357 else if (keep_spaces
)
2365 /* Print the address masks in a human readble fashion. */
2367 rs6000_debug_print_mode (ssize_t m
)
2372 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2373 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2374 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2375 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2377 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2378 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2380 fprintf (stderr
, "%*s Reload=%c%c", spaces
, "",
2381 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2382 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2386 spaces
+= sizeof (" Reload=sl") - 1;
2388 if (reg_addr
[m
].scalar_in_vmx_p
)
2390 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2394 spaces
+= sizeof (" Upper=y") - 1;
2396 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2397 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2399 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2401 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2402 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2405 fputs ("\n", stderr
);
2408 #define DEBUG_FMT_ID "%-32s= "
2409 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2410 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2411 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2413 /* Print various interesting information with -mdebug=reg. */
2415 rs6000_debug_reg_global (void)
2417 static const char *const tf
[2] = { "false", "true" };
2418 const char *nl
= (const char *)0;
2421 char costly_num
[20];
2423 char flags_buffer
[40];
2424 const char *costly_str
;
2425 const char *nop_str
;
2426 const char *trace_str
;
2427 const char *abi_str
;
2428 const char *cmodel_str
;
2429 struct cl_target_option cl_opts
;
2431 /* Modes we want tieable information on. */
2432 static const machine_mode print_tieable_modes
[] = {
2466 /* Virtual regs we are interested in. */
2467 const static struct {
2468 int regno
; /* register number. */
2469 const char *name
; /* register name. */
2470 } virtual_regs
[] = {
2471 { STACK_POINTER_REGNUM
, "stack pointer:" },
2472 { TOC_REGNUM
, "toc: " },
2473 { STATIC_CHAIN_REGNUM
, "static chain: " },
2474 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2475 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2476 { ARG_POINTER_REGNUM
, "arg pointer: " },
2477 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2478 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2479 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2480 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2481 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2482 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2483 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2484 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2485 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2486 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2489 fputs ("\nHard register information:\n", stderr
);
2490 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2491 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2492 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2495 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2496 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2497 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2498 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2499 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2500 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2502 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2503 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2504 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2508 "d reg_class = %s\n"
2509 "f reg_class = %s\n"
2510 "v reg_class = %s\n"
2511 "wa reg_class = %s\n"
2512 "wb reg_class = %s\n"
2513 "wd reg_class = %s\n"
2514 "we reg_class = %s\n"
2515 "wf reg_class = %s\n"
2516 "wg reg_class = %s\n"
2517 "wh reg_class = %s\n"
2518 "wi reg_class = %s\n"
2519 "wj reg_class = %s\n"
2520 "wk reg_class = %s\n"
2521 "wl reg_class = %s\n"
2522 "wm reg_class = %s\n"
2523 "wo reg_class = %s\n"
2524 "wp reg_class = %s\n"
2525 "wq reg_class = %s\n"
2526 "wr reg_class = %s\n"
2527 "ws reg_class = %s\n"
2528 "wt reg_class = %s\n"
2529 "wu reg_class = %s\n"
2530 "wv reg_class = %s\n"
2531 "ww reg_class = %s\n"
2532 "wx reg_class = %s\n"
2533 "wy reg_class = %s\n"
2534 "wz reg_class = %s\n"
2535 "wA reg_class = %s\n"
2536 "wH reg_class = %s\n"
2537 "wI reg_class = %s\n"
2538 "wJ reg_class = %s\n"
2539 "wK reg_class = %s\n"
2541 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2542 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2543 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2544 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2545 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2546 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2547 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2548 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2549 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2550 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2551 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2552 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2553 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2554 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2555 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2556 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2557 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2558 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2559 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2560 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2561 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2562 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2563 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2564 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2565 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2566 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2567 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2568 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2569 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2570 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2571 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2572 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2575 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2576 rs6000_debug_print_mode (m
);
2578 fputs ("\n", stderr
);
2580 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2582 machine_mode mode1
= print_tieable_modes
[m1
];
2583 bool first_time
= true;
2585 nl
= (const char *)0;
2586 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2588 machine_mode mode2
= print_tieable_modes
[m2
];
2589 if (mode1
!= mode2
&& rs6000_modes_tieable_p (mode1
, mode2
))
2593 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2598 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2603 fputs ("\n", stderr
);
2609 if (rs6000_recip_control
)
2611 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2613 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2614 if (rs6000_recip_bits
[m
])
2617 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2619 (RS6000_RECIP_AUTO_RE_P (m
)
2621 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2622 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2624 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2627 fputs ("\n", stderr
);
2630 if (rs6000_cpu_index
>= 0)
2632 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2634 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2636 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2637 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2640 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2642 if (rs6000_tune_index
>= 0)
2644 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2646 = processor_target_table
[rs6000_tune_index
].target_enable
;
2648 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2649 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2652 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2654 cl_target_option_save (&cl_opts
, &global_options
);
2655 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2658 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2659 rs6000_isa_flags_explicit
);
2661 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2662 rs6000_builtin_mask
);
2664 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2666 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2667 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2669 switch (rs6000_sched_costly_dep
)
2671 case max_dep_latency
:
2672 costly_str
= "max_dep_latency";
2676 costly_str
= "no_dep_costly";
2679 case all_deps_costly
:
2680 costly_str
= "all_deps_costly";
2683 case true_store_to_load_dep_costly
:
2684 costly_str
= "true_store_to_load_dep_costly";
2687 case store_to_load_dep_costly
:
2688 costly_str
= "store_to_load_dep_costly";
2692 costly_str
= costly_num
;
2693 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2697 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2699 switch (rs6000_sched_insert_nops
)
2701 case sched_finish_regroup_exact
:
2702 nop_str
= "sched_finish_regroup_exact";
2705 case sched_finish_pad_groups
:
2706 nop_str
= "sched_finish_pad_groups";
2709 case sched_finish_none
:
2710 nop_str
= "sched_finish_none";
2715 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2719 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2721 switch (rs6000_sdata
)
2728 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2732 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2736 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2741 switch (rs6000_traceback
)
2743 case traceback_default
: trace_str
= "default"; break;
2744 case traceback_none
: trace_str
= "none"; break;
2745 case traceback_part
: trace_str
= "part"; break;
2746 case traceback_full
: trace_str
= "full"; break;
2747 default: trace_str
= "unknown"; break;
2750 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2752 switch (rs6000_current_cmodel
)
2754 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2755 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2756 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2757 default: cmodel_str
= "unknown"; break;
2760 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2762 switch (rs6000_current_abi
)
2764 case ABI_NONE
: abi_str
= "none"; break;
2765 case ABI_AIX
: abi_str
= "aix"; break;
2766 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2767 case ABI_V4
: abi_str
= "V4"; break;
2768 case ABI_DARWIN
: abi_str
= "darwin"; break;
2769 default: abi_str
= "unknown"; break;
2772 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2774 if (rs6000_altivec_abi
)
2775 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2777 if (rs6000_darwin64_abi
)
2778 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2780 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2781 (TARGET_SOFT_FLOAT
? "true" : "false"));
2783 if (TARGET_LINK_STACK
)
2784 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2786 if (TARGET_P8_FUSION
)
2790 strcpy (options
, "power8");
2791 if (TARGET_P8_FUSION_SIGN
)
2792 strcat (options
, ", sign");
2794 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2797 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2798 TARGET_SECURE_PLT
? "secure" : "bss");
2799 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2800 aix_struct_return
? "aix" : "sysv");
2801 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2802 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2803 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2804 tf
[!!rs6000_align_branch_targets
]);
2805 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2806 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2807 rs6000_long_double_type_size
);
2808 if (rs6000_long_double_type_size
> 64)
2810 fprintf (stderr
, DEBUG_FMT_S
, "long double type",
2811 TARGET_IEEEQUAD
? "IEEE" : "IBM");
2812 fprintf (stderr
, DEBUG_FMT_S
, "default long double type",
2813 TARGET_IEEEQUAD_DEFAULT
? "IEEE" : "IBM");
2815 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2816 (int)rs6000_sched_restricted_insns_priority
);
2817 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2819 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2820 (int)RS6000_BUILTIN_COUNT
);
2822 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2823 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2826 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2827 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2829 if (TARGET_DIRECT_MOVE_128
)
2830 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2831 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2835 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2836 legitimate address support to figure out the appropriate addressing to
2840 rs6000_setup_reg_addr_masks (void)
2842 ssize_t rc
, reg
, m
, nregs
;
2843 addr_mask_type any_addr_mask
, addr_mask
;
2845 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2847 machine_mode m2
= (machine_mode
) m
;
2848 bool complex_p
= false;
2849 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2852 if (COMPLEX_MODE_P (m2
))
2855 m2
= GET_MODE_INNER (m2
);
2858 msize
= GET_MODE_SIZE (m2
);
2860 /* SDmode is special in that we want to access it only via REG+REG
2861 addressing on power7 and above, since we want to use the LFIWZX and
2862 STFIWZX instructions to load it. */
2863 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2866 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2869 reg
= reload_reg_map
[rc
].reg
;
2871 /* Can mode values go in the GPR/FPR/Altivec registers? */
2872 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2874 bool small_int_vsx_p
= (small_int_p
2875 && (rc
== RELOAD_REG_FPR
2876 || rc
== RELOAD_REG_VMX
));
2878 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2879 addr_mask
|= RELOAD_REG_VALID
;
2881 /* Indicate if the mode takes more than 1 physical register. If
2882 it takes a single register, indicate it can do REG+REG
2883 addressing. Small integers in VSX registers can only do
2884 REG+REG addressing. */
2885 if (small_int_vsx_p
)
2886 addr_mask
|= RELOAD_REG_INDEXED
;
2887 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2888 addr_mask
|= RELOAD_REG_MULTIPLE
;
2890 addr_mask
|= RELOAD_REG_INDEXED
;
2892 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2893 addressing. If we allow scalars into Altivec registers,
2894 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2896 For VSX systems, we don't allow update addressing for
2897 DFmode/SFmode if those registers can go in both the
2898 traditional floating point registers and Altivec registers.
2899 The load/store instructions for the Altivec registers do not
2900 have update forms. If we allowed update addressing, it seems
2901 to break IV-OPT code using floating point if the index type is
2902 int instead of long (PR target/81550 and target/84042). */
2905 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2907 && !VECTOR_MODE_P (m2
)
2908 && !FLOAT128_VECTOR_P (m2
)
2910 && (m
!= E_DFmode
|| !TARGET_VSX
)
2911 && (m
!= E_SFmode
|| !TARGET_P8_VECTOR
)
2912 && !small_int_vsx_p
)
2914 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2916 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2917 we don't allow PRE_MODIFY for some multi-register
2922 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2926 if (TARGET_POWERPC64
)
2927 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2932 if (TARGET_HARD_FLOAT
)
2933 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2939 /* GPR and FPR registers can do REG+OFFSET addressing, except
2940 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2941 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2942 if ((addr_mask
!= 0) && !indexed_only_p
2944 && (rc
== RELOAD_REG_GPR
2945 || ((msize
== 8 || m2
== SFmode
)
2946 && (rc
== RELOAD_REG_FPR
2947 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
2948 addr_mask
|= RELOAD_REG_OFFSET
;
2950 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2951 instructions are enabled. The offset for 128-bit VSX registers is
2952 only 12-bits. While GPRs can handle the full offset range, VSX
2953 registers can only handle the restricted range. */
2954 else if ((addr_mask
!= 0) && !indexed_only_p
2955 && msize
== 16 && TARGET_P9_VECTOR
2956 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
2957 || (m2
== TImode
&& TARGET_VSX
)))
2959 addr_mask
|= RELOAD_REG_OFFSET
;
2960 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
2961 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
2964 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2965 addressing on 128-bit types. */
2966 if (rc
== RELOAD_REG_VMX
&& msize
== 16
2967 && (addr_mask
& RELOAD_REG_VALID
) != 0)
2968 addr_mask
|= RELOAD_REG_AND_M16
;
2970 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
2971 any_addr_mask
|= addr_mask
;
2974 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
2979 /* Initialize the various global tables that are based on register size. */
2981 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
2987 /* Precalculate REGNO_REG_CLASS. */
2988 rs6000_regno_regclass
[0] = GENERAL_REGS
;
2989 for (r
= 1; r
< 32; ++r
)
2990 rs6000_regno_regclass
[r
] = BASE_REGS
;
2992 for (r
= 32; r
< 64; ++r
)
2993 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
2995 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2996 rs6000_regno_regclass
[r
] = NO_REGS
;
2998 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
2999 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
3001 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
3002 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
3003 rs6000_regno_regclass
[r
] = CR_REGS
;
3005 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
3006 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
3007 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
3008 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
3009 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
3010 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
3011 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
3012 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
3013 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
3014 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
3016 /* Precalculate register class to simpler reload register class. We don't
3017 need all of the register classes that are combinations of different
3018 classes, just the simple ones that have constraint letters. */
3019 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3020 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3022 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3023 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3024 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3025 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3026 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3027 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3028 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3029 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3030 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3031 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3035 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3036 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3040 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3041 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3044 /* Precalculate the valid memory formats as well as the vector information,
3045 this must be set up before the rs6000_hard_regno_nregs_internal calls
3047 gcc_assert ((int)VECTOR_NONE
== 0);
3048 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3049 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3051 gcc_assert ((int)CODE_FOR_nothing
== 0);
3052 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3054 gcc_assert ((int)NO_REGS
== 0);
3055 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3057 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3058 believes it can use native alignment or still uses 128-bit alignment. */
3059 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3070 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3071 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3072 if (TARGET_FLOAT128_TYPE
)
3074 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3075 rs6000_vector_align
[KFmode
] = 128;
3077 if (FLOAT128_IEEE_P (TFmode
))
3079 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3080 rs6000_vector_align
[TFmode
] = 128;
3084 /* V2DF mode, VSX only. */
3087 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3088 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3089 rs6000_vector_align
[V2DFmode
] = align64
;
3092 /* V4SF mode, either VSX or Altivec. */
3095 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3096 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3097 rs6000_vector_align
[V4SFmode
] = align32
;
3099 else if (TARGET_ALTIVEC
)
3101 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3102 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3103 rs6000_vector_align
[V4SFmode
] = align32
;
3106 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3110 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3111 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3112 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3113 rs6000_vector_align
[V4SImode
] = align32
;
3114 rs6000_vector_align
[V8HImode
] = align32
;
3115 rs6000_vector_align
[V16QImode
] = align32
;
3119 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3120 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3121 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3125 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3126 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3127 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3131 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3132 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3135 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3136 rs6000_vector_unit
[V2DImode
]
3137 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3138 rs6000_vector_align
[V2DImode
] = align64
;
3140 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3141 rs6000_vector_unit
[V1TImode
]
3142 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3143 rs6000_vector_align
[V1TImode
] = 128;
3146 /* DFmode, see if we want to use the VSX unit. Memory is handled
3147 differently, so don't set rs6000_vector_mem. */
3150 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3151 rs6000_vector_align
[DFmode
] = 64;
3154 /* SFmode, see if we want to use the VSX unit. */
3155 if (TARGET_P8_VECTOR
)
3157 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3158 rs6000_vector_align
[SFmode
] = 32;
3161 /* Allow TImode in VSX register and set the VSX memory macros. */
3164 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3165 rs6000_vector_align
[TImode
] = align64
;
3168 /* Register class constraints for the constraints that depend on compile
3169 switches. When the VSX code was added, different constraints were added
3170 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3171 of the VSX registers are used. The register classes for scalar floating
3172 point types is set, based on whether we allow that type into the upper
3173 (Altivec) registers. GCC has register classes to target the Altivec
3174 registers for load/store operations, to select using a VSX memory
3175 operation instead of the traditional floating point operation. The
3178 d - Register class to use with traditional DFmode instructions.
3179 f - Register class to use with traditional SFmode instructions.
3180 v - Altivec register.
3181 wa - Any VSX register.
3182 wc - Reserved to represent individual CR bits (used in LLVM).
3183 wd - Preferred register class for V2DFmode.
3184 wf - Preferred register class for V4SFmode.
3185 wg - Float register for power6x move insns.
3186 wh - FP register for direct move instructions.
3187 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3188 wj - FP or VSX register to hold 64-bit integers for direct moves.
3189 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3190 wl - Float register if we can do 32-bit signed int loads.
3191 wm - VSX register for ISA 2.07 direct move operations.
3192 wn - always NO_REGS.
3193 wr - GPR if 64-bit mode is permitted.
3194 ws - Register class to do ISA 2.06 DF operations.
3195 wt - VSX register for TImode in VSX registers.
3196 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3197 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3198 ww - Register class to do SF conversions in with VSX operations.
3199 wx - Float register if we can do 32-bit int stores.
3200 wy - Register class to do ISA 2.07 SF operations.
3201 wz - Float register if we can do 32-bit unsigned int loads.
3202 wH - Altivec register if SImode is allowed in VSX registers.
3203 wI - VSX register if SImode is allowed in VSX registers.
3204 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3205 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3207 if (TARGET_HARD_FLOAT
)
3209 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3210 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3215 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3216 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3217 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3218 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3219 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3220 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3221 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3224 /* Add conditional constraints based on various options, to allow us to
3225 collapse multiple insn patterns. */
3227 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3229 if (TARGET_MFPGPR
) /* DFmode */
3230 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3233 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3235 if (TARGET_DIRECT_MOVE
)
3237 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3238 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3239 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3240 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3241 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3242 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3245 if (TARGET_POWERPC64
)
3247 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3248 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3251 if (TARGET_P8_VECTOR
) /* SFmode */
3253 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3254 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3255 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3257 else if (TARGET_VSX
)
3258 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3261 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3264 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3266 if (TARGET_FLOAT128_TYPE
)
3268 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3269 if (FLOAT128_IEEE_P (TFmode
))
3270 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3273 if (TARGET_P9_VECTOR
)
3275 /* Support for new D-form instructions. */
3276 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3278 /* Support for ISA 3.0 (power9) vectors. */
3279 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3282 /* Support for new direct moves (ISA 3.0 + 64bit). */
3283 if (TARGET_DIRECT_MOVE_128
)
3284 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3286 /* Support small integers in VSX registers. */
3287 if (TARGET_P8_VECTOR
)
3289 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3290 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3291 if (TARGET_P9_VECTOR
)
3293 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3294 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3298 /* Set up the reload helper and direct move functions. */
3299 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3303 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3304 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3305 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3306 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3307 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3308 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3309 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3310 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3311 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3312 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3313 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3314 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3315 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3316 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3317 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3318 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3319 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3320 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3321 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3322 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3324 if (FLOAT128_VECTOR_P (KFmode
))
3326 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3327 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3330 if (FLOAT128_VECTOR_P (TFmode
))
3332 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3333 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3336 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3338 if (TARGET_NO_SDMODE_STACK
)
3340 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3341 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3346 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3347 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3350 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3352 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3353 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3354 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3355 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3356 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3357 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3358 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3359 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3360 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3362 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3363 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3364 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3365 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3366 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3367 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3368 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3369 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3370 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3372 if (FLOAT128_VECTOR_P (KFmode
))
3374 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3375 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3378 if (FLOAT128_VECTOR_P (TFmode
))
3380 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3381 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3387 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3388 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3389 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3390 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3391 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3392 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3393 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3394 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3395 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3396 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3397 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3398 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3399 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3400 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3401 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3402 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3403 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3404 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3405 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3406 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3408 if (FLOAT128_VECTOR_P (KFmode
))
3410 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3411 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3414 if (FLOAT128_IEEE_P (TFmode
))
3416 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3417 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3420 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3422 if (TARGET_NO_SDMODE_STACK
)
3424 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3425 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3430 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3431 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3434 if (TARGET_DIRECT_MOVE
)
3436 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3437 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3438 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3442 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3443 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3445 if (TARGET_P8_VECTOR
)
3447 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3448 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3450 if (TARGET_P9_VECTOR
)
3452 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3453 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3458 /* Precalculate HARD_REGNO_NREGS. */
3459 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3460 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3461 rs6000_hard_regno_nregs
[m
][r
]
3462 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3464 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3465 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3466 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3467 if (rs6000_hard_regno_mode_ok_uncached (r
, (machine_mode
)m
))
3468 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3470 /* Precalculate CLASS_MAX_NREGS sizes. */
3471 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3475 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3476 reg_size
= UNITS_PER_VSX_WORD
;
3478 else if (c
== ALTIVEC_REGS
)
3479 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3481 else if (c
== FLOAT_REGS
)
3482 reg_size
= UNITS_PER_FP_WORD
;
3485 reg_size
= UNITS_PER_WORD
;
3487 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3489 machine_mode m2
= (machine_mode
)m
;
3490 int reg_size2
= reg_size
;
3492 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3494 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3495 reg_size2
= UNITS_PER_FP_WORD
;
3497 rs6000_class_max_nregs
[m
][c
]
3498 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3502 /* Calculate which modes to automatically generate code to use a the
3503 reciprocal divide and square root instructions. In the future, possibly
3504 automatically generate the instructions even if the user did not specify
3505 -mrecip. The older machines double precision reciprocal sqrt estimate is
3506 not accurate enough. */
3507 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3509 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3511 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3512 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3513 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3514 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3515 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3517 if (TARGET_FRSQRTES
)
3518 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3520 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3521 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3522 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3523 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3524 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3526 if (rs6000_recip_control
)
3528 if (!flag_finite_math_only
)
3529 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3531 if (flag_trapping_math
)
3532 warning (0, "%qs requires %qs or %qs", "-mrecip",
3533 "-fno-trapping-math", "-ffast-math");
3534 if (!flag_reciprocal_math
)
3535 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3537 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3539 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3540 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3541 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3543 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3544 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3545 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3547 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3548 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3549 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3551 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3552 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3553 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3556 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3557 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3560 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3561 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3564 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3565 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3568 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3569 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3573 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3574 legitimate address support to figure out the appropriate addressing to
3576 rs6000_setup_reg_addr_masks ();
3578 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3580 if (TARGET_DEBUG_REG
)
3581 rs6000_debug_reg_global ();
3583 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3585 "SImode variable mult cost = %d\n"
3586 "SImode constant mult cost = %d\n"
3587 "SImode short constant mult cost = %d\n"
3588 "DImode multipliciation cost = %d\n"
3589 "SImode division cost = %d\n"
3590 "DImode division cost = %d\n"
3591 "Simple fp operation cost = %d\n"
3592 "DFmode multiplication cost = %d\n"
3593 "SFmode division cost = %d\n"
3594 "DFmode division cost = %d\n"
3595 "cache line size = %d\n"
3596 "l1 cache size = %d\n"
3597 "l2 cache size = %d\n"
3598 "simultaneous prefetches = %d\n"
3601 rs6000_cost
->mulsi_const
,
3602 rs6000_cost
->mulsi_const9
,
3610 rs6000_cost
->cache_line_size
,
3611 rs6000_cost
->l1_cache_size
,
3612 rs6000_cost
->l2_cache_size
,
3613 rs6000_cost
->simultaneous_prefetches
);
3618 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3621 darwin_rs6000_override_options (void)
3623 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3625 rs6000_altivec_abi
= 1;
3626 TARGET_ALTIVEC_VRSAVE
= 1;
3627 rs6000_current_abi
= ABI_DARWIN
;
3629 if (DEFAULT_ABI
== ABI_DARWIN
3631 darwin_one_byte_bool
= 1;
3633 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3635 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3636 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3640 rs6000_default_long_calls
= 1;
3641 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3644 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3646 if (!flag_mkernel
&& !flag_apple_kext
3648 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3649 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3651 /* Unless the user (not the configurer) has explicitly overridden
3652 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3653 G4 unless targeting the kernel. */
3656 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3657 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3658 && ! global_options_set
.x_rs6000_cpu_index
)
3660 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3665 /* If not otherwise specified by a target, make 'long double' equivalent to
3668 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3669 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3672 /* Return the builtin mask of the various options used that could affect which
3673 builtins were used. In the past we used target_flags, but we've run out of
3674 bits, and some options are no longer in target_flags. */
3677 rs6000_builtin_mask_calculate (void)
3679 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3680 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3681 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3682 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3683 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3684 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3685 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3686 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3687 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3688 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3689 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3690 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3691 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3692 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3693 | ((TARGET_POWERPC64
) ? RS6000_BTM_POWERPC64
: 0)
3694 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3695 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3696 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3697 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3698 | ((TARGET_LONG_DOUBLE_128
3699 && TARGET_HARD_FLOAT
3700 && !TARGET_IEEEQUAD
) ? RS6000_BTM_LDBL128
: 0)
3701 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0)
3702 | ((TARGET_FLOAT128_HW
) ? RS6000_BTM_FLOAT128_HW
: 0));
3705 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3706 to clobber the XER[CA] bit because clobbering that bit without telling
3707 the compiler worked just fine with versions of GCC before GCC 5, and
3708 breaking a lot of older code in ways that are hard to track down is
3709 not such a great idea. */
3712 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3713 vec
<const char *> &/*constraints*/,
3714 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3716 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3717 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3721 /* Override command line options.
3723 Combine build-specific configuration information with options
3724 specified on the command line to set various state variables which
3725 influence code generation, optimization, and expansion of built-in
3726 functions. Assure that command-line configuration preferences are
3727 compatible with each other and with the build configuration; issue
3728 warnings while adjusting configuration or error messages while
3729 rejecting configuration.
3731 Upon entry to this function:
3733 This function is called once at the beginning of
3734 compilation, and then again at the start and end of compiling
3735 each section of code that has a different configuration, as
3736 indicated, for example, by adding the
3738 __attribute__((__target__("cpu=power9")))
3740 qualifier to a function definition or, for example, by bracketing
3743 #pragma GCC target("altivec")
3747 #pragma GCC reset_options
3749 directives. Parameter global_init_p is true for the initial
3750 invocation, which initializes global variables, and false for all
3751 subsequent invocations.
3754 Various global state information is assumed to be valid. This
3755 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3756 default CPU specified at build configure time, TARGET_DEFAULT,
3757 representing the default set of option flags for the default
3758 target, and global_options_set.x_rs6000_isa_flags, representing
3759 which options were requested on the command line.
3761 Upon return from this function:
3763 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3764 was set by name on the command line. Additionally, if certain
3765 attributes are automatically enabled or disabled by this function
3766 in order to assure compatibility between options and
3767 configuration, the flags associated with those attributes are
3768 also set. By setting these "explicit bits", we avoid the risk
3769 that other code might accidentally overwrite these particular
3770 attributes with "default values".
3772 The various bits of rs6000_isa_flags are set to indicate the
3773 target options that have been selected for the most current
3774 compilation efforts. This has the effect of also turning on the
3775 associated TARGET_XXX values since these are macros which are
3776 generally defined to test the corresponding bit of the
3777 rs6000_isa_flags variable.
3779 The variable rs6000_builtin_mask is set to represent the target
3780 options for the most current compilation efforts, consistent with
3781 the current contents of rs6000_isa_flags. This variable controls
3782 expansion of built-in functions.
3784 Various other global variables and fields of global structures
3785 (over 50 in all) are initialized to reflect the desired options
3786 for the most current compilation efforts. */
3789 rs6000_option_override_internal (bool global_init_p
)
3793 HOST_WIDE_INT set_masks
;
3794 HOST_WIDE_INT ignore_masks
;
3797 struct cl_target_option
*main_target_opt
3798 = ((global_init_p
|| target_option_default_node
== NULL
)
3799 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3801 /* Print defaults. */
3802 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
3803 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
3805 /* Remember the explicit arguments. */
3807 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3809 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3810 library functions, so warn about it. The flag may be useful for
3811 performance studies from time to time though, so don't disable it
3813 if (global_options_set
.x_rs6000_alignment_flags
3814 && rs6000_alignment_flags
== MASK_ALIGN_POWER
3815 && DEFAULT_ABI
== ABI_DARWIN
3817 warning (0, "%qs is not supported for 64-bit Darwin;"
3818 " it is incompatible with the installed C and C++ libraries",
3821 /* Numerous experiment shows that IRA based loop pressure
3822 calculation works better for RTL loop invariant motion on targets
3823 with enough (>= 32) registers. It is an expensive optimization.
3824 So it is on only for peak performance. */
3825 if (optimize
>= 3 && global_init_p
3826 && !global_options_set
.x_flag_ira_loop_pressure
)
3827 flag_ira_loop_pressure
= 1;
3829 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3830 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3831 options were already specified. */
3832 if (flag_sanitize
& SANITIZE_USER_ADDRESS
3833 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
3834 flag_asynchronous_unwind_tables
= 1;
3836 /* Set the pointer size. */
3839 rs6000_pmode
= DImode
;
3840 rs6000_pointer_size
= 64;
3844 rs6000_pmode
= SImode
;
3845 rs6000_pointer_size
= 32;
3848 /* Some OSs don't support saving the high part of 64-bit registers on context
3849 switch. Other OSs don't support saving Altivec registers. On those OSs,
3850 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3851 if the user wants either, the user must explicitly specify them and we
3852 won't interfere with the user's specification. */
3854 set_masks
= POWERPC_MASKS
;
3855 #ifdef OS_MISSING_POWERPC64
3856 if (OS_MISSING_POWERPC64
)
3857 set_masks
&= ~OPTION_MASK_POWERPC64
;
3859 #ifdef OS_MISSING_ALTIVEC
3860 if (OS_MISSING_ALTIVEC
)
3861 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
3862 | OTHER_VSX_VECTOR_MASKS
);
3865 /* Don't override by the processor default if given explicitly. */
3866 set_masks
&= ~rs6000_isa_flags_explicit
;
3868 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3869 the cpu in a target attribute or pragma, but did not specify a tuning
3870 option, use the cpu for the tuning option rather than the option specified
3871 with -mtune on the command line. Process a '--with-cpu' configuration
3872 request as an implicit --cpu. */
3873 if (rs6000_cpu_index
>= 0)
3874 cpu_index
= rs6000_cpu_index
;
3875 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
3876 cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
3877 else if (OPTION_TARGET_CPU_DEFAULT
)
3878 cpu_index
= rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT
);
3880 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3881 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3882 with those from the cpu, except for options that were explicitly set. If
3883 we don't have a cpu, do not override the target bits set in
3887 rs6000_cpu_index
= cpu_index
;
3888 rs6000_isa_flags
&= ~set_masks
;
3889 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
3894 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3895 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3896 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3897 to using rs6000_isa_flags, we need to do the initialization here.
3899 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3900 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3901 HOST_WIDE_INT flags
;
3903 flags
= TARGET_DEFAULT
;
3906 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3907 const char *default_cpu
= (!TARGET_POWERPC64
3912 int default_cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
3913 flags
= processor_target_table
[default_cpu_index
].target_enable
;
3915 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
3918 if (rs6000_tune_index
>= 0)
3919 tune_index
= rs6000_tune_index
;
3920 else if (cpu_index
>= 0)
3921 rs6000_tune_index
= tune_index
= cpu_index
;
3925 enum processor_type tune_proc
3926 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
3929 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
3930 if (processor_target_table
[i
].processor
== tune_proc
)
3938 rs6000_cpu
= processor_target_table
[cpu_index
].processor
;
3940 rs6000_cpu
= TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
;
3942 gcc_assert (tune_index
>= 0);
3943 rs6000_tune
= processor_target_table
[tune_index
].processor
;
3945 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
3946 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
3947 || rs6000_cpu
== PROCESSOR_PPCE5500
)
3950 error ("AltiVec not supported in this target");
3953 /* If we are optimizing big endian systems for space, use the load/store
3954 multiple instructions. */
3955 if (BYTES_BIG_ENDIAN
&& optimize_size
)
3956 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
;
3958 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3959 because the hardware doesn't support the instructions used in little
3960 endian mode, and causes an alignment trap. The 750 does not cause an
3961 alignment trap (except when the target is unaligned). */
3963 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
&& TARGET_MULTIPLE
)
3965 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
3966 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
3967 warning (0, "%qs is not supported on little endian systems",
3971 /* If little-endian, default to -mstrict-align on older processors.
3972 Testing for htm matches power8 and later. */
3973 if (!BYTES_BIG_ENDIAN
3974 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
3975 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
3977 if (!rs6000_fold_gimple
)
3979 "gimple folding of rs6000 builtins has been disabled.\n");
3981 /* Add some warnings for VSX. */
3984 const char *msg
= NULL
;
3985 if (!TARGET_HARD_FLOAT
)
3987 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
3988 msg
= N_("-mvsx requires hardware floating point");
3991 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
3992 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
3995 else if (TARGET_AVOID_XFORM
> 0)
3996 msg
= N_("-mvsx needs indexed addressing");
3997 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
3998 & OPTION_MASK_ALTIVEC
))
4000 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4001 msg
= N_("-mvsx and -mno-altivec are incompatible");
4003 msg
= N_("-mno-altivec disables vsx");
4009 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4010 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4014 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4015 the -mcpu setting to enable options that conflict. */
4016 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4017 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4018 | OPTION_MASK_ALTIVEC
4019 | OPTION_MASK_VSX
)) != 0)
4020 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4021 | OPTION_MASK_DIRECT_MOVE
)
4022 & ~rs6000_isa_flags_explicit
);
4024 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4025 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4027 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4028 off all of the options that depend on those flags. */
4029 ignore_masks
= rs6000_disable_incompatible_switches ();
4031 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4032 unless the user explicitly used the -mno-<option> to disable the code. */
4033 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4034 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4035 else if (TARGET_P9_MINMAX
)
4039 if (cpu_index
== PROCESSOR_POWER9
)
4041 /* legacy behavior: allow -mcpu=power9 with certain
4042 capabilities explicitly disabled. */
4043 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4046 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4047 "for <xxx> less than power9", "-mcpu");
4049 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4050 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4051 & rs6000_isa_flags_explicit
))
4052 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4053 were explicitly cleared. */
4054 error ("%qs incompatible with explicitly disabled options",
4057 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4059 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4060 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4061 else if (TARGET_VSX
)
4062 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4063 else if (TARGET_POPCNTD
)
4064 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4065 else if (TARGET_DFP
)
4066 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4067 else if (TARGET_CMPB
)
4068 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4069 else if (TARGET_FPRND
)
4070 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4071 else if (TARGET_POPCNTB
)
4072 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4073 else if (TARGET_ALTIVEC
)
4074 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4076 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4078 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4079 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4080 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4083 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4085 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4086 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4087 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4090 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4092 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4093 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4094 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4097 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4099 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4100 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4101 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4102 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4104 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4105 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4106 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4110 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4112 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4113 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4117 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4119 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4120 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4121 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4124 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4125 silently turn off quad memory mode. */
4126 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4128 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4129 warning (0, N_("-mquad-memory requires 64-bit mode"));
4131 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4132 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4134 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4135 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4138 /* Non-atomic quad memory load/store are disabled for little endian, since
4139 the words are reversed, but atomic operations can still be done by
4140 swapping the words. */
4141 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4143 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4144 warning (0, N_("-mquad-memory is not available in little endian "
4147 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4150 /* Assume if the user asked for normal quad memory instructions, they want
4151 the atomic versions as well, unless they explicity told us not to use quad
4152 word atomic instructions. */
4153 if (TARGET_QUAD_MEMORY
4154 && !TARGET_QUAD_MEMORY_ATOMIC
4155 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4156 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4158 /* If we can shrink-wrap the TOC register save separately, then use
4159 -msave-toc-indirect unless explicitly disabled. */
4160 if ((rs6000_isa_flags_explicit
& OPTION_MASK_SAVE_TOC_INDIRECT
) == 0
4161 && flag_shrink_wrap_separate
4162 && optimize_function_for_speed_p (cfun
))
4163 rs6000_isa_flags
|= OPTION_MASK_SAVE_TOC_INDIRECT
;
4165 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4166 generating power8 instructions. Power9 does not optimize power8 fusion
4168 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4170 if (processor_target_table
[tune_index
].processor
== PROCESSOR_POWER8
)
4171 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4173 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4176 /* Setting additional fusion flags turns on base fusion. */
4177 if (!TARGET_P8_FUSION
&& TARGET_P8_FUSION_SIGN
)
4179 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4181 if (TARGET_P8_FUSION_SIGN
)
4182 error ("%qs requires %qs", "-mpower8-fusion-sign",
4185 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4188 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4191 /* Power8 does not fuse sign extended loads with the addis. If we are
4192 optimizing at high levels for speed, convert a sign extended load into a
4193 zero extending load, and an explicit sign extension. */
4194 if (TARGET_P8_FUSION
4195 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4196 && optimize_function_for_speed_p (cfun
)
4198 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4200 /* ISA 3.0 vector instructions include ISA 2.07. */
4201 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4203 /* We prefer to not mention undocumented options in
4204 error messages. However, if users have managed to select
4205 power9-vector without selecting power8-vector, they
4206 already know about undocumented flags. */
4207 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4208 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4209 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4210 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4212 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4213 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4214 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4218 /* OPTION_MASK_P9_VECTOR is explicit and
4219 OPTION_MASK_P8_VECTOR is not explicit. */
4220 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4221 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4225 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4226 support. If we only have ISA 2.06 support, and the user did not specify
4227 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4228 but we don't enable the full vectorization support */
4229 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4230 TARGET_ALLOW_MOVMISALIGN
= 1;
4232 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4234 if (TARGET_ALLOW_MOVMISALIGN
> 0
4235 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4236 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4238 TARGET_ALLOW_MOVMISALIGN
= 0;
4241 /* Determine when unaligned vector accesses are permitted, and when
4242 they are preferred over masked Altivec loads. Note that if
4243 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4244 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4246 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4250 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4251 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4253 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4256 else if (!TARGET_ALLOW_MOVMISALIGN
)
4258 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4259 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4260 "-mallow-movmisalign");
4262 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4266 /* Use long double size to select the appropriate long double. We use
4267 TYPE_PRECISION to differentiate the 3 different long double types. We map
4268 128 into the precision used for TFmode. */
4269 int default_long_double_size
= (RS6000_DEFAULT_LONG_DOUBLE_SIZE
== 64
4271 : FLOAT_PRECISION_TFmode
);
4273 /* Set long double size before the IEEE 128-bit tests. */
4274 if (!global_options_set
.x_rs6000_long_double_type_size
)
4276 if (main_target_opt
!= NULL
4277 && (main_target_opt
->x_rs6000_long_double_type_size
4278 != default_long_double_size
))
4279 error ("target attribute or pragma changes long double size");
4281 rs6000_long_double_type_size
= default_long_double_size
;
4283 else if (rs6000_long_double_type_size
== 128)
4284 rs6000_long_double_type_size
= FLOAT_PRECISION_TFmode
;
4286 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4287 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4288 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4289 those systems will not pick up this default. Warn if the user changes the
4290 default unless -Wno-psabi. */
4291 if (!global_options_set
.x_rs6000_ieeequad
)
4292 rs6000_ieeequad
= TARGET_IEEEQUAD_DEFAULT
;
4294 else if (rs6000_ieeequad
!= TARGET_IEEEQUAD_DEFAULT
&& TARGET_LONG_DOUBLE_128
)
4296 static bool warned_change_long_double
;
4297 if (!warned_change_long_double
)
4299 warned_change_long_double
= true;
4300 if (TARGET_IEEEQUAD
)
4301 warning (OPT_Wpsabi
, "Using IEEE extended precision long double");
4303 warning (OPT_Wpsabi
, "Using IBM extended precision long double");
4307 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4308 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4309 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4310 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4311 the keyword as well as the type. */
4312 TARGET_FLOAT128_TYPE
= TARGET_FLOAT128_ENABLE_TYPE
&& TARGET_VSX
;
4314 /* IEEE 128-bit floating point requires VSX support. */
4315 if (TARGET_FLOAT128_KEYWORD
)
4319 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4320 error ("%qs requires VSX support", "-mfloat128");
4322 TARGET_FLOAT128_TYPE
= 0;
4323 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_KEYWORD
4324 | OPTION_MASK_FLOAT128_HW
);
4326 else if (!TARGET_FLOAT128_TYPE
)
4328 TARGET_FLOAT128_TYPE
= 1;
4329 warning (0, "The -mfloat128 option may not be fully supported");
4333 /* Enable the __float128 keyword under Linux by default. */
4334 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_KEYWORD
4335 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4336 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4338 /* If we have are supporting the float128 type and full ISA 3.0 support,
4339 enable -mfloat128-hardware by default. However, don't enable the
4340 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4341 because sometimes the compiler wants to put things in an integer
4342 container, and if we don't have __int128 support, it is impossible. */
4343 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
&& TARGET_64BIT
4344 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4345 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4346 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4348 if (TARGET_FLOAT128_HW
4349 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4351 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4352 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4354 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4357 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4359 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4360 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4362 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4365 /* Print the options after updating the defaults. */
4366 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4367 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4369 /* E500mc does "better" if we inline more aggressively. Respect the
4370 user's opinion, though. */
4371 if (rs6000_block_move_inline_limit
== 0
4372 && (rs6000_tune
== PROCESSOR_PPCE500MC
4373 || rs6000_tune
== PROCESSOR_PPCE500MC64
4374 || rs6000_tune
== PROCESSOR_PPCE5500
4375 || rs6000_tune
== PROCESSOR_PPCE6500
))
4376 rs6000_block_move_inline_limit
= 128;
4378 /* store_one_arg depends on expand_block_move to handle at least the
4379 size of reg_parm_stack_space. */
4380 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4381 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4385 /* If the appropriate debug option is enabled, replace the target hooks
4386 with debug versions that call the real version and then prints
4387 debugging information. */
4388 if (TARGET_DEBUG_COST
)
4390 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4391 targetm
.address_cost
= rs6000_debug_address_cost
;
4392 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4395 if (TARGET_DEBUG_ADDR
)
4397 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4398 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4399 rs6000_secondary_reload_class_ptr
4400 = rs6000_debug_secondary_reload_class
;
4401 targetm
.secondary_memory_needed
4402 = rs6000_debug_secondary_memory_needed
;
4403 targetm
.can_change_mode_class
4404 = rs6000_debug_can_change_mode_class
;
4405 rs6000_preferred_reload_class_ptr
4406 = rs6000_debug_preferred_reload_class
;
4407 rs6000_legitimize_reload_address_ptr
4408 = rs6000_debug_legitimize_reload_address
;
4409 rs6000_mode_dependent_address_ptr
4410 = rs6000_debug_mode_dependent_address
;
4413 if (rs6000_veclibabi_name
)
4415 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4416 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4419 error ("unknown vectorization library ABI type (%qs) for "
4420 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4426 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4427 target attribute or pragma which automatically enables both options,
4428 unless the altivec ABI was set. This is set by default for 64-bit, but
4430 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4432 TARGET_FLOAT128_TYPE
= 0;
4433 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4434 | OPTION_MASK_FLOAT128_KEYWORD
)
4435 & ~rs6000_isa_flags_explicit
);
4438 /* Enable Altivec ABI for AIX -maltivec. */
4439 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4441 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4442 error ("target attribute or pragma changes AltiVec ABI");
4444 rs6000_altivec_abi
= 1;
4447 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4448 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4449 be explicitly overridden in either case. */
4452 if (!global_options_set
.x_rs6000_altivec_abi
4453 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4455 if (main_target_opt
!= NULL
&&
4456 !main_target_opt
->x_rs6000_altivec_abi
)
4457 error ("target attribute or pragma changes AltiVec ABI");
4459 rs6000_altivec_abi
= 1;
4463 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4464 So far, the only darwin64 targets are also MACH-O. */
4466 && DEFAULT_ABI
== ABI_DARWIN
4469 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4470 error ("target attribute or pragma changes darwin64 ABI");
4473 rs6000_darwin64_abi
= 1;
4474 /* Default to natural alignment, for better performance. */
4475 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4479 /* Place FP constants in the constant pool instead of TOC
4480 if section anchors enabled. */
4481 if (flag_section_anchors
4482 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4483 TARGET_NO_FP_IN_TOC
= 1;
4485 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4486 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4488 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4489 SUBTARGET_OVERRIDE_OPTIONS
;
4491 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4492 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4494 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4495 SUB3TARGET_OVERRIDE_OPTIONS
;
4498 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4499 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4501 rs6000_always_hint
= (rs6000_tune
!= PROCESSOR_POWER4
4502 && rs6000_tune
!= PROCESSOR_POWER5
4503 && rs6000_tune
!= PROCESSOR_POWER6
4504 && rs6000_tune
!= PROCESSOR_POWER7
4505 && rs6000_tune
!= PROCESSOR_POWER8
4506 && rs6000_tune
!= PROCESSOR_POWER9
4507 && rs6000_tune
!= PROCESSOR_PPCA2
4508 && rs6000_tune
!= PROCESSOR_CELL
4509 && rs6000_tune
!= PROCESSOR_PPC476
);
4510 rs6000_sched_groups
= (rs6000_tune
== PROCESSOR_POWER4
4511 || rs6000_tune
== PROCESSOR_POWER5
4512 || rs6000_tune
== PROCESSOR_POWER7
4513 || rs6000_tune
== PROCESSOR_POWER8
);
4514 rs6000_align_branch_targets
= (rs6000_tune
== PROCESSOR_POWER4
4515 || rs6000_tune
== PROCESSOR_POWER5
4516 || rs6000_tune
== PROCESSOR_POWER6
4517 || rs6000_tune
== PROCESSOR_POWER7
4518 || rs6000_tune
== PROCESSOR_POWER8
4519 || rs6000_tune
== PROCESSOR_POWER9
4520 || rs6000_tune
== PROCESSOR_PPCE500MC
4521 || rs6000_tune
== PROCESSOR_PPCE500MC64
4522 || rs6000_tune
== PROCESSOR_PPCE5500
4523 || rs6000_tune
== PROCESSOR_PPCE6500
);
4525 /* Allow debug switches to override the above settings. These are set to -1
4526 in rs6000.opt to indicate the user hasn't directly set the switch. */
4527 if (TARGET_ALWAYS_HINT
>= 0)
4528 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4530 if (TARGET_SCHED_GROUPS
>= 0)
4531 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4533 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4534 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4536 rs6000_sched_restricted_insns_priority
4537 = (rs6000_sched_groups
? 1 : 0);
4539 /* Handle -msched-costly-dep option. */
4540 rs6000_sched_costly_dep
4541 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4543 if (rs6000_sched_costly_dep_str
)
4545 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4546 rs6000_sched_costly_dep
= no_dep_costly
;
4547 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4548 rs6000_sched_costly_dep
= all_deps_costly
;
4549 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4550 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4551 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4552 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4554 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4555 atoi (rs6000_sched_costly_dep_str
));
4558 /* Handle -minsert-sched-nops option. */
4559 rs6000_sched_insert_nops
4560 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4562 if (rs6000_sched_insert_nops_str
)
4564 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4565 rs6000_sched_insert_nops
= sched_finish_none
;
4566 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4567 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4568 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4569 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4571 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4572 atoi (rs6000_sched_insert_nops_str
));
4575 /* Handle stack protector */
4576 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4577 #ifdef TARGET_THREAD_SSP_OFFSET
4578 rs6000_stack_protector_guard
= SSP_TLS
;
4580 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4583 #ifdef TARGET_THREAD_SSP_OFFSET
4584 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4585 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4588 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4591 const char *str
= rs6000_stack_protector_guard_offset_str
;
4594 long offset
= strtol (str
, &endp
, 0);
4595 if (!*str
|| *endp
|| errno
)
4596 error ("%qs is not a valid number in %qs", str
,
4597 "-mstack-protector-guard-offset=");
4599 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4600 || (TARGET_64BIT
&& (offset
& 3)))
4601 error ("%qs is not a valid offset in %qs", str
,
4602 "-mstack-protector-guard-offset=");
4604 rs6000_stack_protector_guard_offset
= offset
;
4607 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4609 const char *str
= rs6000_stack_protector_guard_reg_str
;
4610 int reg
= decode_reg_name (str
);
4612 if (!IN_RANGE (reg
, 1, 31))
4613 error ("%qs is not a valid base register in %qs", str
,
4614 "-mstack-protector-guard-reg=");
4616 rs6000_stack_protector_guard_reg
= reg
;
4619 if (rs6000_stack_protector_guard
== SSP_TLS
4620 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4621 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4625 #ifdef TARGET_REGNAMES
4626 /* If the user desires alternate register names, copy in the
4627 alternate names now. */
4628 if (TARGET_REGNAMES
)
4629 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4632 /* Set aix_struct_return last, after the ABI is determined.
4633 If -maix-struct-return or -msvr4-struct-return was explicitly
4634 used, don't override with the ABI default. */
4635 if (!global_options_set
.x_aix_struct_return
)
4636 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4639 /* IBM XL compiler defaults to unsigned bitfields. */
4640 if (TARGET_XL_COMPAT
)
4641 flag_signed_bitfields
= 0;
4644 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4645 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4647 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4649 /* We can only guarantee the availability of DI pseudo-ops when
4650 assembling for 64-bit targets. */
4653 targetm
.asm_out
.aligned_op
.di
= NULL
;
4654 targetm
.asm_out
.unaligned_op
.di
= NULL
;
4658 /* Set branch target alignment, if not optimizing for size. */
4661 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4662 aligned 8byte to avoid misprediction by the branch predictor. */
4663 if (rs6000_tune
== PROCESSOR_TITAN
4664 || rs6000_tune
== PROCESSOR_CELL
)
4666 if (flag_align_functions
&& !str_align_functions
)
4667 str_align_functions
= "8";
4668 if (flag_align_jumps
&& !str_align_jumps
)
4669 str_align_jumps
= "8";
4670 if (flag_align_loops
&& !str_align_loops
)
4671 str_align_loops
= "8";
4673 if (rs6000_align_branch_targets
)
4675 if (flag_align_functions
&& !str_align_functions
)
4676 str_align_functions
= "16";
4677 if (flag_align_jumps
&& !str_align_jumps
)
4678 str_align_jumps
= "16";
4679 if (flag_align_loops
&& !str_align_loops
)
4681 can_override_loop_align
= 1;
4682 str_align_loops
= "16";
4686 if (flag_align_jumps
&& !str_align_jumps
)
4687 str_align_jumps
= "16";
4688 if (flag_align_loops
&& !str_align_loops
)
4689 str_align_loops
= "16";
4692 /* Arrange to save and restore machine status around nested functions. */
4693 init_machine_status
= rs6000_init_machine_status
;
4695 /* We should always be splitting complex arguments, but we can't break
4696 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4697 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
4698 targetm
.calls
.split_complex_arg
= NULL
;
4700 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4701 if (DEFAULT_ABI
== ABI_AIX
)
4702 targetm
.calls
.custom_function_descriptors
= 0;
4705 /* Initialize rs6000_cost with the appropriate target costs. */
4707 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
4709 switch (rs6000_tune
)
4711 case PROCESSOR_RS64A
:
4712 rs6000_cost
= &rs64a_cost
;
4715 case PROCESSOR_MPCCORE
:
4716 rs6000_cost
= &mpccore_cost
;
4719 case PROCESSOR_PPC403
:
4720 rs6000_cost
= &ppc403_cost
;
4723 case PROCESSOR_PPC405
:
4724 rs6000_cost
= &ppc405_cost
;
4727 case PROCESSOR_PPC440
:
4728 rs6000_cost
= &ppc440_cost
;
4731 case PROCESSOR_PPC476
:
4732 rs6000_cost
= &ppc476_cost
;
4735 case PROCESSOR_PPC601
:
4736 rs6000_cost
= &ppc601_cost
;
4739 case PROCESSOR_PPC603
:
4740 rs6000_cost
= &ppc603_cost
;
4743 case PROCESSOR_PPC604
:
4744 rs6000_cost
= &ppc604_cost
;
4747 case PROCESSOR_PPC604e
:
4748 rs6000_cost
= &ppc604e_cost
;
4751 case PROCESSOR_PPC620
:
4752 rs6000_cost
= &ppc620_cost
;
4755 case PROCESSOR_PPC630
:
4756 rs6000_cost
= &ppc630_cost
;
4759 case PROCESSOR_CELL
:
4760 rs6000_cost
= &ppccell_cost
;
4763 case PROCESSOR_PPC750
:
4764 case PROCESSOR_PPC7400
:
4765 rs6000_cost
= &ppc750_cost
;
4768 case PROCESSOR_PPC7450
:
4769 rs6000_cost
= &ppc7450_cost
;
4772 case PROCESSOR_PPC8540
:
4773 case PROCESSOR_PPC8548
:
4774 rs6000_cost
= &ppc8540_cost
;
4777 case PROCESSOR_PPCE300C2
:
4778 case PROCESSOR_PPCE300C3
:
4779 rs6000_cost
= &ppce300c2c3_cost
;
4782 case PROCESSOR_PPCE500MC
:
4783 rs6000_cost
= &ppce500mc_cost
;
4786 case PROCESSOR_PPCE500MC64
:
4787 rs6000_cost
= &ppce500mc64_cost
;
4790 case PROCESSOR_PPCE5500
:
4791 rs6000_cost
= &ppce5500_cost
;
4794 case PROCESSOR_PPCE6500
:
4795 rs6000_cost
= &ppce6500_cost
;
4798 case PROCESSOR_TITAN
:
4799 rs6000_cost
= &titan_cost
;
4802 case PROCESSOR_POWER4
:
4803 case PROCESSOR_POWER5
:
4804 rs6000_cost
= &power4_cost
;
4807 case PROCESSOR_POWER6
:
4808 rs6000_cost
= &power6_cost
;
4811 case PROCESSOR_POWER7
:
4812 rs6000_cost
= &power7_cost
;
4815 case PROCESSOR_POWER8
:
4816 rs6000_cost
= &power8_cost
;
4819 case PROCESSOR_POWER9
:
4820 rs6000_cost
= &power9_cost
;
4823 case PROCESSOR_PPCA2
:
4824 rs6000_cost
= &ppca2_cost
;
4833 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
4834 rs6000_cost
->simultaneous_prefetches
,
4835 global_options
.x_param_values
,
4836 global_options_set
.x_param_values
);
4837 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
4838 global_options
.x_param_values
,
4839 global_options_set
.x_param_values
);
4840 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
4841 rs6000_cost
->cache_line_size
,
4842 global_options
.x_param_values
,
4843 global_options_set
.x_param_values
);
4844 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
4845 global_options
.x_param_values
,
4846 global_options_set
.x_param_values
);
4848 /* Increase loop peeling limits based on performance analysis. */
4849 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
4850 global_options
.x_param_values
,
4851 global_options_set
.x_param_values
);
4852 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
4853 global_options
.x_param_values
,
4854 global_options_set
.x_param_values
);
4856 /* Use the 'model' -fsched-pressure algorithm by default. */
4857 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
4858 SCHED_PRESSURE_MODEL
,
4859 global_options
.x_param_values
,
4860 global_options_set
.x_param_values
);
4862 /* If using typedef char *va_list, signal that
4863 __builtin_va_start (&ap, 0) can be optimized to
4864 ap = __builtin_next_arg (0). */
4865 if (DEFAULT_ABI
!= ABI_V4
)
4866 targetm
.expand_builtin_va_start
= NULL
;
4869 /* If not explicitly specified via option, decide whether to generate indexed
4870 load/store instructions. A value of -1 indicates that the
4871 initial value of this variable has not been overwritten. During
4872 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4873 if (TARGET_AVOID_XFORM
== -1)
4874 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4875 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4876 need indexed accesses and the type used is the scalar type of the element
4877 being loaded or stored. */
4878 TARGET_AVOID_XFORM
= (rs6000_tune
== PROCESSOR_POWER6
&& TARGET_CMPB
4879 && !TARGET_ALTIVEC
);
4881 /* Set the -mrecip options. */
4882 if (rs6000_recip_name
)
4884 char *p
= ASTRDUP (rs6000_recip_name
);
4886 unsigned int mask
, i
;
4889 while ((q
= strtok (p
, ",")) != NULL
)
4900 if (!strcmp (q
, "default"))
4901 mask
= ((TARGET_RECIP_PRECISION
)
4902 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
4905 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
4906 if (!strcmp (q
, recip_options
[i
].string
))
4908 mask
= recip_options
[i
].mask
;
4912 if (i
== ARRAY_SIZE (recip_options
))
4914 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
4922 rs6000_recip_control
&= ~mask
;
4924 rs6000_recip_control
|= mask
;
4928 /* Set the builtin mask of the various options used that could affect which
4929 builtins were used. In the past we used target_flags, but we've run out
4930 of bits, and some options are no longer in target_flags. */
4931 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
4932 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
4933 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
4934 rs6000_builtin_mask
);
4936 /* Initialize all of the registers. */
4937 rs6000_init_hard_regno_mode_ok (global_init_p
);
4939 /* Save the initial options in case the user does function specific options */
4941 target_option_default_node
= target_option_current_node
4942 = build_target_option_node (&global_options
);
4944 /* If not explicitly specified via option, decide whether to generate the
4945 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4946 if (TARGET_LINK_STACK
== -1)
4947 SET_TARGET_LINK_STACK (rs6000_tune
== PROCESSOR_PPC476
&& flag_pic
);
4949 /* Deprecate use of -mno-speculate-indirect-jumps. */
4950 if (!rs6000_speculate_indirect_jumps
)
4951 warning (0, "%qs is deprecated and not recommended in any circumstances",
4952 "-mno-speculate-indirect-jumps");
4957 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4958 define the target cpu type. */
4961 rs6000_option_override (void)
4963 (void) rs6000_option_override_internal (true);
4967 /* Implement targetm.vectorize.builtin_mask_for_load. */
4969 rs6000_builtin_mask_for_load (void)
4971 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4972 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
4973 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
4974 return altivec_builtin_mask_for_load
;
4979 /* Implement LOOP_ALIGN. */
4981 rs6000_loop_align (rtx label
)
4986 /* Don't override loop alignment if -falign-loops was specified. */
4987 if (!can_override_loop_align
)
4990 bb
= BLOCK_FOR_INSN (label
);
4991 ninsns
= num_loop_insns(bb
->loop_father
);
4993 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4994 if (ninsns
> 4 && ninsns
<= 8
4995 && (rs6000_tune
== PROCESSOR_POWER4
4996 || rs6000_tune
== PROCESSOR_POWER5
4997 || rs6000_tune
== PROCESSOR_POWER6
4998 || rs6000_tune
== PROCESSOR_POWER7
4999 || rs6000_tune
== PROCESSOR_POWER8
))
5000 return align_flags (5);
5005 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5006 after applying N number of iterations. This routine does not determine
5007 how may iterations are required to reach desired alignment. */
5010 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5017 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5020 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5030 /* Assuming that all other types are naturally aligned. CHECKME! */
5035 /* Return true if the vector misalignment factor is supported by the
5038 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5045 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5048 /* Return if movmisalign pattern is not supported for this mode. */
5049 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5052 if (misalignment
== -1)
5054 /* Misalignment factor is unknown at compile time but we know
5055 it's word aligned. */
5056 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5058 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5060 if (element_size
== 64 || element_size
== 32)
5067 /* VSX supports word-aligned vector. */
5068 if (misalignment
% 4 == 0)
5074 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5076 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5077 tree vectype
, int misalign
)
5082 switch (type_of_cost
)
5092 case cond_branch_not_taken
:
5101 case vec_promote_demote
:
5107 case cond_branch_taken
:
5110 case unaligned_load
:
5111 case vector_gather_load
:
5112 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5115 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5117 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5119 /* Double word aligned. */
5127 /* Double word aligned. */
5131 /* Unknown misalignment. */
5144 /* Misaligned loads are not supported. */
5149 case unaligned_store
:
5150 case vector_scatter_store
:
5151 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5154 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5156 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5158 /* Double word aligned. */
5166 /* Double word aligned. */
5170 /* Unknown misalignment. */
5183 /* Misaligned stores are not supported. */
5189 /* This is a rough approximation assuming non-constant elements
5190 constructed into a vector via element insertion. FIXME:
5191 vec_construct is not granular enough for uniformly good
5192 decisions. If the initialization is a splat, this is
5193 cheaper than we estimate. Improve this someday. */
5194 elem_type
= TREE_TYPE (vectype
);
5195 /* 32-bit vectors loaded into registers are stored as double
5196 precision, so we need 2 permutes, 2 converts, and 1 merge
5197 to construct a vector of short floats from them. */
5198 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5199 && TYPE_PRECISION (elem_type
) == 32)
5201 /* On POWER9, integer vector types are built up in GPRs and then
5202 use a direct move (2 cycles). For POWER8 this is even worse,
5203 as we need two direct moves and a merge, and the direct moves
5205 else if (INTEGRAL_TYPE_P (elem_type
))
5207 if (TARGET_P9_VECTOR
)
5208 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5210 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5213 /* V2DFmode doesn't need a direct move. */
5221 /* Implement targetm.vectorize.preferred_simd_mode. */
5224 rs6000_preferred_simd_mode (scalar_mode mode
)
5233 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5253 typedef struct _rs6000_cost_data
5255 struct loop
*loop_info
;
5259 /* Test for likely overcommitment of vector hardware resources. If a
5260 loop iteration is relatively large, and too large a percentage of
5261 instructions in the loop are vectorized, the cost model may not
5262 adequately reflect delays from unavailable vector resources.
5263 Penalize the loop body cost for this case. */
5266 rs6000_density_test (rs6000_cost_data
*data
)
5268 const int DENSITY_PCT_THRESHOLD
= 85;
5269 const int DENSITY_SIZE_THRESHOLD
= 70;
5270 const int DENSITY_PENALTY
= 10;
5271 struct loop
*loop
= data
->loop_info
;
5272 basic_block
*bbs
= get_loop_body (loop
);
5273 int nbbs
= loop
->num_nodes
;
5274 loop_vec_info loop_vinfo
= loop_vec_info_for_loop (data
->loop_info
);
5275 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5278 for (i
= 0; i
< nbbs
; i
++)
5280 basic_block bb
= bbs
[i
];
5281 gimple_stmt_iterator gsi
;
5283 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5285 gimple
*stmt
= gsi_stmt (gsi
);
5286 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
5288 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5289 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5295 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5297 if (density_pct
> DENSITY_PCT_THRESHOLD
5298 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5300 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5301 if (dump_enabled_p ())
5302 dump_printf_loc (MSG_NOTE
, vect_location
,
5303 "density %d%%, cost %d exceeds threshold, penalizing "
5304 "loop body cost by %d%%", density_pct
,
5305 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5309 /* Implement targetm.vectorize.init_cost. */
5311 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5312 instruction is needed by the vectorization. */
5313 static bool rs6000_vect_nonmem
;
5316 rs6000_init_cost (struct loop
*loop_info
)
5318 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5319 data
->loop_info
= loop_info
;
5320 data
->cost
[vect_prologue
] = 0;
5321 data
->cost
[vect_body
] = 0;
5322 data
->cost
[vect_epilogue
] = 0;
5323 rs6000_vect_nonmem
= false;
5327 /* Implement targetm.vectorize.add_stmt_cost. */
5330 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5331 struct _stmt_vec_info
*stmt_info
, int misalign
,
5332 enum vect_cost_model_location where
)
5334 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5335 unsigned retval
= 0;
5337 if (flag_vect_cost_model
)
5339 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5340 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5342 /* Statements in an inner loop relative to the loop being
5343 vectorized are weighted more heavily. The value here is
5344 arbitrary and could potentially be improved with analysis. */
5345 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5346 count
*= 50; /* FIXME. */
5348 retval
= (unsigned) (count
* stmt_cost
);
5349 cost_data
->cost
[where
] += retval
;
5351 /* Check whether we're doing something other than just a copy loop.
5352 Not all such loops may be profitably vectorized; see
5353 rs6000_finish_cost. */
5354 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5355 || kind
== vec_promote_demote
|| kind
== vec_construct
5356 || kind
== scalar_to_vec
)
5357 || (where
== vect_body
&& kind
== vector_stmt
))
5358 rs6000_vect_nonmem
= true;
5364 /* Implement targetm.vectorize.finish_cost. */
5367 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5368 unsigned *body_cost
, unsigned *epilogue_cost
)
5370 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5372 if (cost_data
->loop_info
)
5373 rs6000_density_test (cost_data
);
5375 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5376 that require versioning for any reason. The vectorization is at
5377 best a wash inside the loop, and the versioning checks make
5378 profitability highly unlikely and potentially quite harmful. */
5379 if (cost_data
->loop_info
)
5381 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5382 if (!rs6000_vect_nonmem
5383 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5384 && LOOP_REQUIRES_VERSIONING (vec_info
))
5385 cost_data
->cost
[vect_body
] += 10000;
5388 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5389 *body_cost
= cost_data
->cost
[vect_body
];
5390 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5393 /* Implement targetm.vectorize.destroy_cost_data. */
5396 rs6000_destroy_cost_data (void *data
)
5401 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5402 library with vectorized intrinsics. */
5405 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5409 const char *suffix
= NULL
;
5410 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5413 machine_mode el_mode
, in_mode
;
5416 /* Libmass is suitable for unsafe math only as it does not correctly support
5417 parts of IEEE with the required precision such as denormals. Only support
5418 it if we have VSX to use the simd d2 or f4 functions.
5419 XXX: Add variable length support. */
5420 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5423 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5424 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5425 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5426 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5427 if (el_mode
!= in_mode
5463 if (el_mode
== DFmode
&& n
== 2)
5465 bdecl
= mathfn_built_in (double_type_node
, fn
);
5466 suffix
= "d2"; /* pow -> powd2 */
5468 else if (el_mode
== SFmode
&& n
== 4)
5470 bdecl
= mathfn_built_in (float_type_node
, fn
);
5471 suffix
= "4"; /* powf -> powf4 */
5483 gcc_assert (suffix
!= NULL
);
5484 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5488 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5489 strcat (name
, suffix
);
5492 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5493 else if (n_args
== 2)
5494 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5498 /* Build a function declaration for the vectorized function. */
5499 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5500 FUNCTION_DECL
, get_identifier (name
), fntype
);
5501 TREE_PUBLIC (new_fndecl
) = 1;
5502 DECL_EXTERNAL (new_fndecl
) = 1;
5503 DECL_IS_NOVOPS (new_fndecl
) = 1;
5504 TREE_READONLY (new_fndecl
) = 1;
5509 /* Returns a function decl for a vectorized version of the builtin function
5510 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5511 if it is not available. */
5514 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5517 machine_mode in_mode
, out_mode
;
5520 if (TARGET_DEBUG_BUILTIN
)
5521 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5522 combined_fn_name (combined_fn (fn
)),
5523 GET_MODE_NAME (TYPE_MODE (type_out
)),
5524 GET_MODE_NAME (TYPE_MODE (type_in
)));
5526 if (TREE_CODE (type_out
) != VECTOR_TYPE
5527 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5530 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5531 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5532 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5533 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5538 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5539 && out_mode
== DFmode
&& out_n
== 2
5540 && in_mode
== DFmode
&& in_n
== 2)
5541 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5542 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5543 && out_mode
== SFmode
&& out_n
== 4
5544 && in_mode
== SFmode
&& in_n
== 4)
5545 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5546 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5547 && out_mode
== SFmode
&& out_n
== 4
5548 && in_mode
== SFmode
&& in_n
== 4)
5549 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5552 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5553 && out_mode
== DFmode
&& out_n
== 2
5554 && in_mode
== DFmode
&& in_n
== 2)
5555 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5556 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5557 && out_mode
== SFmode
&& out_n
== 4
5558 && in_mode
== SFmode
&& in_n
== 4)
5559 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5560 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5561 && out_mode
== SFmode
&& out_n
== 4
5562 && in_mode
== SFmode
&& in_n
== 4)
5563 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5566 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5567 && out_mode
== DFmode
&& out_n
== 2
5568 && in_mode
== DFmode
&& in_n
== 2)
5569 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5570 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5571 && out_mode
== SFmode
&& out_n
== 4
5572 && in_mode
== SFmode
&& in_n
== 4)
5573 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5574 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5575 && out_mode
== SFmode
&& out_n
== 4
5576 && in_mode
== SFmode
&& in_n
== 4)
5577 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5580 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5581 && out_mode
== DFmode
&& out_n
== 2
5582 && in_mode
== DFmode
&& in_n
== 2)
5583 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5584 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5585 && out_mode
== SFmode
&& out_n
== 4
5586 && in_mode
== SFmode
&& in_n
== 4)
5587 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5588 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5589 && out_mode
== SFmode
&& out_n
== 4
5590 && in_mode
== SFmode
&& in_n
== 4)
5591 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5594 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5595 && out_mode
== DFmode
&& out_n
== 2
5596 && in_mode
== DFmode
&& in_n
== 2)
5597 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5598 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5599 && out_mode
== SFmode
&& out_n
== 4
5600 && in_mode
== SFmode
&& in_n
== 4)
5601 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5602 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5603 && out_mode
== SFmode
&& out_n
== 4
5604 && in_mode
== SFmode
&& in_n
== 4)
5605 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5608 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5609 && flag_unsafe_math_optimizations
5610 && out_mode
== DFmode
&& out_n
== 2
5611 && in_mode
== DFmode
&& in_n
== 2)
5612 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5613 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5614 && flag_unsafe_math_optimizations
5615 && out_mode
== SFmode
&& out_n
== 4
5616 && in_mode
== SFmode
&& in_n
== 4)
5617 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5620 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5621 && !flag_trapping_math
5622 && out_mode
== DFmode
&& out_n
== 2
5623 && in_mode
== DFmode
&& in_n
== 2)
5624 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5625 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5626 && !flag_trapping_math
5627 && out_mode
== SFmode
&& out_n
== 4
5628 && in_mode
== SFmode
&& in_n
== 4)
5629 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5635 /* Generate calls to libmass if appropriate. */
5636 if (rs6000_veclib_handler
)
5637 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5642 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5645 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
5648 machine_mode in_mode
, out_mode
;
5651 if (TARGET_DEBUG_BUILTIN
)
5652 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5653 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
5654 GET_MODE_NAME (TYPE_MODE (type_out
)),
5655 GET_MODE_NAME (TYPE_MODE (type_in
)));
5657 if (TREE_CODE (type_out
) != VECTOR_TYPE
5658 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5661 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5662 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5663 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5664 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5666 enum rs6000_builtins fn
5667 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
5670 case RS6000_BUILTIN_RSQRTF
:
5671 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5672 && out_mode
== SFmode
&& out_n
== 4
5673 && in_mode
== SFmode
&& in_n
== 4)
5674 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
5676 case RS6000_BUILTIN_RSQRT
:
5677 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5678 && out_mode
== DFmode
&& out_n
== 2
5679 && in_mode
== DFmode
&& in_n
== 2)
5680 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
5682 case RS6000_BUILTIN_RECIPF
:
5683 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5684 && out_mode
== SFmode
&& out_n
== 4
5685 && in_mode
== SFmode
&& in_n
== 4)
5686 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
5688 case RS6000_BUILTIN_RECIP
:
5689 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5690 && out_mode
== DFmode
&& out_n
== 2
5691 && in_mode
== DFmode
&& in_n
== 2)
5692 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
5700 /* Default CPU string for rs6000*_file_start functions. */
5701 static const char *rs6000_default_cpu
;
5703 /* Do anything needed at the start of the asm file. */
5706 rs6000_file_start (void)
5709 const char *start
= buffer
;
5710 FILE *file
= asm_out_file
;
5712 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
5714 default_file_start ();
5716 if (flag_verbose_asm
)
5718 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
5720 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
5722 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
5726 if (global_options_set
.x_rs6000_cpu_index
)
5728 fprintf (file
, "%s -mcpu=%s", start
,
5729 processor_target_table
[rs6000_cpu_index
].name
);
5733 if (global_options_set
.x_rs6000_tune_index
)
5735 fprintf (file
, "%s -mtune=%s", start
,
5736 processor_target_table
[rs6000_tune_index
].name
);
5740 if (PPC405_ERRATUM77
)
5742 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
5746 #ifdef USING_ELFOS_H
5747 switch (rs6000_sdata
)
5749 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
5750 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
5751 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
5752 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
5755 if (rs6000_sdata
&& g_switch_value
)
5757 fprintf (file
, "%s -G %d", start
,
5767 #ifdef USING_ELFOS_H
5768 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
5769 && !global_options_set
.x_rs6000_cpu_index
)
5771 fputs ("\t.machine ", asm_out_file
);
5772 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
5773 fputs ("power9\n", asm_out_file
);
5774 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
5775 fputs ("power8\n", asm_out_file
);
5776 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
5777 fputs ("power7\n", asm_out_file
);
5778 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
5779 fputs ("power6\n", asm_out_file
);
5780 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
5781 fputs ("power5\n", asm_out_file
);
5782 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
5783 fputs ("power4\n", asm_out_file
);
5784 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
5785 fputs ("ppc64\n", asm_out_file
);
5787 fputs ("ppc\n", asm_out_file
);
5791 if (DEFAULT_ABI
== ABI_ELFv2
)
5792 fprintf (file
, "\t.abiversion 2\n");
5796 /* Return nonzero if this function is known to have a null epilogue. */
5799 direct_return (void)
5801 if (reload_completed
)
5803 rs6000_stack_t
*info
= rs6000_stack_info ();
5805 if (info
->first_gp_reg_save
== 32
5806 && info
->first_fp_reg_save
== 64
5807 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
5808 && ! info
->lr_save_p
5809 && ! info
->cr_save_p
5810 && info
->vrsave_size
== 0
5818 /* Return the number of instructions it takes to form a constant in an
5819 integer register. */
5822 num_insns_constant_wide (HOST_WIDE_INT value
)
5824 /* signed constant loadable with addi */
5825 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
5828 /* constant loadable with addis */
5829 else if ((value
& 0xffff) == 0
5830 && (value
>> 31 == -1 || value
>> 31 == 0))
5833 else if (TARGET_POWERPC64
)
5835 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
5836 HOST_WIDE_INT high
= value
>> 31;
5838 if (high
== 0 || high
== -1)
5844 return num_insns_constant_wide (high
) + 1;
5846 return num_insns_constant_wide (low
) + 1;
5848 return (num_insns_constant_wide (high
)
5849 + num_insns_constant_wide (low
) + 1);
5857 num_insns_constant (rtx op
, machine_mode mode
)
5859 HOST_WIDE_INT low
, high
;
5861 switch (GET_CODE (op
))
5864 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
5865 && rs6000_is_valid_and_mask (op
, mode
))
5868 return num_insns_constant_wide (INTVAL (op
));
5870 case CONST_WIDE_INT
:
5873 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
5874 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
5875 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
5880 if (mode
== SFmode
|| mode
== SDmode
)
5884 if (DECIMAL_FLOAT_MODE_P (mode
))
5885 REAL_VALUE_TO_TARGET_DECIMAL32
5886 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5888 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5889 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
5893 if (DECIMAL_FLOAT_MODE_P (mode
))
5894 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5896 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5897 high
= l
[WORDS_BIG_ENDIAN
== 0];
5898 low
= l
[WORDS_BIG_ENDIAN
!= 0];
5901 return (num_insns_constant_wide (low
)
5902 + num_insns_constant_wide (high
));
5905 if ((high
== 0 && low
>= 0)
5906 || (high
== -1 && low
< 0))
5907 return num_insns_constant_wide (low
);
5909 else if (rs6000_is_valid_and_mask (op
, mode
))
5913 return num_insns_constant_wide (high
) + 1;
5916 return (num_insns_constant_wide (high
)
5917 + num_insns_constant_wide (low
) + 1);
5925 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5926 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5927 corresponding element of the vector, but for V4SFmode, the
5928 corresponding "float" is interpreted as an SImode integer. */
5931 const_vector_elt_as_int (rtx op
, unsigned int elt
)
5935 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5936 gcc_assert (GET_MODE (op
) != V2DImode
5937 && GET_MODE (op
) != V2DFmode
);
5939 tmp
= CONST_VECTOR_ELT (op
, elt
);
5940 if (GET_MODE (op
) == V4SFmode
)
5941 tmp
= gen_lowpart (SImode
, tmp
);
5942 return INTVAL (tmp
);
5945 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5946 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5947 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5948 all items are set to the same value and contain COPIES replicas of the
5949 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5950 operand and the others are set to the value of the operand's msb. */
5953 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
5955 machine_mode mode
= GET_MODE (op
);
5956 machine_mode inner
= GET_MODE_INNER (mode
);
5964 HOST_WIDE_INT splat_val
;
5965 HOST_WIDE_INT msb_val
;
5967 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
5970 nunits
= GET_MODE_NUNITS (mode
);
5971 bitsize
= GET_MODE_BITSIZE (inner
);
5972 mask
= GET_MODE_MASK (inner
);
5974 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
5976 msb_val
= val
>= 0 ? 0 : -1;
5978 /* Construct the value to be splatted, if possible. If not, return 0. */
5979 for (i
= 2; i
<= copies
; i
*= 2)
5981 HOST_WIDE_INT small_val
;
5983 small_val
= splat_val
>> bitsize
;
5985 if (splat_val
!= ((HOST_WIDE_INT
)
5986 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
5987 | (small_val
& mask
)))
5989 splat_val
= small_val
;
5992 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5993 if (EASY_VECTOR_15 (splat_val
))
5996 /* Also check if we can splat, and then add the result to itself. Do so if
5997 the value is positive, of if the splat instruction is using OP's mode;
5998 for splat_val < 0, the splat and the add should use the same mode. */
5999 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6000 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6003 /* Also check if are loading up the most significant bit which can be done by
6004 loading up -1 and shifting the value left by -1. */
6005 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6011 /* Check if VAL is present in every STEP-th element, and the
6012 other elements are filled with its most significant bit. */
6013 for (i
= 1; i
< nunits
; ++i
)
6015 HOST_WIDE_INT desired_val
;
6016 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6017 if ((i
& (step
- 1)) == 0)
6020 desired_val
= msb_val
;
6022 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6029 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6030 instruction, filling in the bottom elements with 0 or -1.
6032 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6033 for the number of zeroes to shift in, or negative for the number of 0xff
6036 OP is a CONST_VECTOR. */
6039 vspltis_shifted (rtx op
)
6041 machine_mode mode
= GET_MODE (op
);
6042 machine_mode inner
= GET_MODE_INNER (mode
);
6050 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6053 /* We need to create pseudo registers to do the shift, so don't recognize
6054 shift vector constants after reload. */
6055 if (!can_create_pseudo_p ())
6058 nunits
= GET_MODE_NUNITS (mode
);
6059 mask
= GET_MODE_MASK (inner
);
6061 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6063 /* Check if the value can really be the operand of a vspltis[bhw]. */
6064 if (EASY_VECTOR_15 (val
))
6067 /* Also check if we are loading up the most significant bit which can be done
6068 by loading up -1 and shifting the value left by -1. */
6069 else if (EASY_VECTOR_MSB (val
, inner
))
6075 /* Check if VAL is present in every STEP-th element until we find elements
6076 that are 0 or all 1 bits. */
6077 for (i
= 1; i
< nunits
; ++i
)
6079 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6080 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6082 /* If the value isn't the splat value, check for the remaining elements
6088 for (j
= i
+1; j
< nunits
; ++j
)
6090 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6091 if (const_vector_elt_as_int (op
, elt2
) != 0)
6095 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6098 else if ((elt_val
& mask
) == mask
)
6100 for (j
= i
+1; j
< nunits
; ++j
)
6102 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6103 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6107 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6115 /* If all elements are equal, we don't need to do VLSDOI. */
6120 /* Return true if OP is of the given MODE and can be synthesized
6121 with a vspltisb, vspltish or vspltisw. */
6124 easy_altivec_constant (rtx op
, machine_mode mode
)
6126 unsigned step
, copies
;
6128 if (mode
== VOIDmode
)
6129 mode
= GET_MODE (op
);
6130 else if (mode
!= GET_MODE (op
))
6133 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6135 if (mode
== V2DFmode
)
6136 return zero_constant (op
, mode
);
6138 else if (mode
== V2DImode
)
6140 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6141 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6144 if (zero_constant (op
, mode
))
6147 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6148 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6154 /* V1TImode is a special container for TImode. Ignore for now. */
6155 else if (mode
== V1TImode
)
6158 /* Start with a vspltisw. */
6159 step
= GET_MODE_NUNITS (mode
) / 4;
6162 if (vspltis_constant (op
, step
, copies
))
6165 /* Then try with a vspltish. */
6171 if (vspltis_constant (op
, step
, copies
))
6174 /* And finally a vspltisb. */
6180 if (vspltis_constant (op
, step
, copies
))
6183 if (vspltis_shifted (op
) != 0)
6189 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6190 result is OP. Abort if it is not possible. */
6193 gen_easy_altivec_constant (rtx op
)
6195 machine_mode mode
= GET_MODE (op
);
6196 int nunits
= GET_MODE_NUNITS (mode
);
6197 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6198 unsigned step
= nunits
/ 4;
6199 unsigned copies
= 1;
6201 /* Start with a vspltisw. */
6202 if (vspltis_constant (op
, step
, copies
))
6203 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6205 /* Then try with a vspltish. */
6211 if (vspltis_constant (op
, step
, copies
))
6212 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6214 /* And finally a vspltisb. */
6220 if (vspltis_constant (op
, step
, copies
))
6221 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6226 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6227 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6229 Return the number of instructions needed (1 or 2) into the address pointed
6232 Return the constant that is being split via CONSTANT_PTR. */
6235 xxspltib_constant_p (rtx op
,
6240 size_t nunits
= GET_MODE_NUNITS (mode
);
6242 HOST_WIDE_INT value
;
6245 /* Set the returned values to out of bound values. */
6246 *num_insns_ptr
= -1;
6247 *constant_ptr
= 256;
6249 if (!TARGET_P9_VECTOR
)
6252 if (mode
== VOIDmode
)
6253 mode
= GET_MODE (op
);
6255 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6258 /* Handle (vec_duplicate <constant>). */
6259 if (GET_CODE (op
) == VEC_DUPLICATE
)
6261 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6262 && mode
!= V2DImode
)
6265 element
= XEXP (op
, 0);
6266 if (!CONST_INT_P (element
))
6269 value
= INTVAL (element
);
6270 if (!IN_RANGE (value
, -128, 127))
6274 /* Handle (const_vector [...]). */
6275 else if (GET_CODE (op
) == CONST_VECTOR
)
6277 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6278 && mode
!= V2DImode
)
6281 element
= CONST_VECTOR_ELT (op
, 0);
6282 if (!CONST_INT_P (element
))
6285 value
= INTVAL (element
);
6286 if (!IN_RANGE (value
, -128, 127))
6289 for (i
= 1; i
< nunits
; i
++)
6291 element
= CONST_VECTOR_ELT (op
, i
);
6292 if (!CONST_INT_P (element
))
6295 if (value
!= INTVAL (element
))
6300 /* Handle integer constants being loaded into the upper part of the VSX
6301 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6302 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6303 else if (CONST_INT_P (op
))
6305 if (!SCALAR_INT_MODE_P (mode
))
6308 value
= INTVAL (op
);
6309 if (!IN_RANGE (value
, -128, 127))
6312 if (!IN_RANGE (value
, -1, 0))
6314 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6317 if (EASY_VECTOR_15 (value
))
6325 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6326 sign extend. Special case 0/-1 to allow getting any VSX register instead
6327 of an Altivec register. */
6328 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6329 && EASY_VECTOR_15 (value
))
6332 /* Return # of instructions and the constant byte for XXSPLTIB. */
6333 if (mode
== V16QImode
)
6336 else if (IN_RANGE (value
, -1, 0))
6342 *constant_ptr
= (int) value
;
6347 output_vec_const_move (rtx
*operands
)
6355 mode
= GET_MODE (dest
);
6359 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6360 int xxspltib_value
= 256;
6363 if (zero_constant (vec
, mode
))
6365 if (TARGET_P9_VECTOR
)
6366 return "xxspltib %x0,0";
6368 else if (dest_vmx_p
)
6369 return "vspltisw %0,0";
6372 return "xxlxor %x0,%x0,%x0";
6375 if (all_ones_constant (vec
, mode
))
6377 if (TARGET_P9_VECTOR
)
6378 return "xxspltib %x0,255";
6380 else if (dest_vmx_p
)
6381 return "vspltisw %0,-1";
6383 else if (TARGET_P8_VECTOR
)
6384 return "xxlorc %x0,%x0,%x0";
6390 if (TARGET_P9_VECTOR
6391 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6395 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6396 return "xxspltib %x0,%2";
6407 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6408 if (zero_constant (vec
, mode
))
6409 return "vspltisw %0,0";
6411 if (all_ones_constant (vec
, mode
))
6412 return "vspltisw %0,-1";
6414 /* Do we need to construct a value using VSLDOI? */
6415 shift
= vspltis_shifted (vec
);
6419 splat_vec
= gen_easy_altivec_constant (vec
);
6420 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6421 operands
[1] = XEXP (splat_vec
, 0);
6422 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6425 switch (GET_MODE (splat_vec
))
6428 return "vspltisw %0,%1";
6431 return "vspltish %0,%1";
6434 return "vspltisb %0,%1";
6444 /* Initialize vector TARGET to VALS. */
6447 rs6000_expand_vector_init (rtx target
, rtx vals
)
6449 machine_mode mode
= GET_MODE (target
);
6450 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6451 int n_elts
= GET_MODE_NUNITS (mode
);
6452 int n_var
= 0, one_var
= -1;
6453 bool all_same
= true, all_const_zero
= true;
6457 for (i
= 0; i
< n_elts
; ++i
)
6459 x
= XVECEXP (vals
, 0, i
);
6460 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6461 ++n_var
, one_var
= i
;
6462 else if (x
!= CONST0_RTX (inner_mode
))
6463 all_const_zero
= false;
6465 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6471 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6472 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6473 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6475 /* Zero register. */
6476 emit_move_insn (target
, CONST0_RTX (mode
));
6479 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6481 /* Splat immediate. */
6482 emit_insn (gen_rtx_SET (target
, const_vec
));
6487 /* Load from constant pool. */
6488 emit_move_insn (target
, const_vec
);
6493 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6494 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
6498 size_t num_elements
= all_same
? 1 : 2;
6499 for (i
= 0; i
< num_elements
; i
++)
6501 op
[i
] = XVECEXP (vals
, 0, i
);
6502 /* Just in case there is a SUBREG with a smaller mode, do a
6504 if (GET_MODE (op
[i
]) != inner_mode
)
6506 rtx tmp
= gen_reg_rtx (inner_mode
);
6507 convert_move (tmp
, op
[i
], 0);
6510 /* Allow load with splat double word. */
6511 else if (MEM_P (op
[i
]))
6514 op
[i
] = force_reg (inner_mode
, op
[i
]);
6516 else if (!REG_P (op
[i
]))
6517 op
[i
] = force_reg (inner_mode
, op
[i
]);
6522 if (mode
== V2DFmode
)
6523 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
6525 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
6529 if (mode
== V2DFmode
)
6530 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
6532 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
6537 /* Special case initializing vector int if we are on 64-bit systems with
6538 direct move or we have the ISA 3.0 instructions. */
6539 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
6540 && TARGET_DIRECT_MOVE_64BIT
)
6544 rtx element0
= XVECEXP (vals
, 0, 0);
6545 if (MEM_P (element0
))
6546 element0
= rs6000_force_indexed_or_indirect_mem (element0
);
6548 element0
= force_reg (SImode
, element0
);
6550 if (TARGET_P9_VECTOR
)
6551 emit_insn (gen_vsx_splat_v4si (target
, element0
));
6554 rtx tmp
= gen_reg_rtx (DImode
);
6555 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
6556 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
6565 for (i
= 0; i
< 4; i
++)
6566 elements
[i
] = force_reg (SImode
, XVECEXP (vals
, 0, i
));
6568 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
6569 elements
[2], elements
[3]));
6574 /* With single precision floating point on VSX, know that internally single
6575 precision is actually represented as a double, and either make 2 V2DF
6576 vectors, and convert these vectors to single precision, or do one
6577 conversion, and splat the result to the other elements. */
6578 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
6582 rtx element0
= XVECEXP (vals
, 0, 0);
6584 if (TARGET_P9_VECTOR
)
6586 if (MEM_P (element0
))
6587 element0
= rs6000_force_indexed_or_indirect_mem (element0
);
6589 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
6594 rtx freg
= gen_reg_rtx (V4SFmode
);
6595 rtx sreg
= force_reg (SFmode
, element0
);
6596 rtx cvt
= (TARGET_XSCVDPSPN
6597 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
6598 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
6601 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
6607 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
6608 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
6609 rtx flt_even
= gen_reg_rtx (V4SFmode
);
6610 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
6611 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
6612 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
6613 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
6614 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
6616 /* Use VMRGEW if we can instead of doing a permute. */
6617 if (TARGET_P8_VECTOR
)
6619 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
6620 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
6621 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
6622 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
6623 if (BYTES_BIG_ENDIAN
)
6624 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
6626 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
6630 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
6631 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
6632 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
6633 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
6634 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
6640 /* Special case initializing vector short/char that are splats if we are on
6641 64-bit systems with direct move. */
6642 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
6643 && (mode
== V16QImode
|| mode
== V8HImode
))
6645 rtx op0
= XVECEXP (vals
, 0, 0);
6646 rtx di_tmp
= gen_reg_rtx (DImode
);
6649 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
6651 if (mode
== V16QImode
)
6653 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
6654 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
6658 if (mode
== V8HImode
)
6660 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
6661 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
6666 /* Store value to stack temp. Load vector element. Splat. However, splat
6667 of 64-bit items is not supported on Altivec. */
6668 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
6670 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
6671 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
6672 XVECEXP (vals
, 0, 0));
6673 x
= gen_rtx_UNSPEC (VOIDmode
,
6674 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
6675 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
6677 gen_rtx_SET (target
, mem
),
6679 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
6680 gen_rtx_PARALLEL (VOIDmode
,
6681 gen_rtvec (1, const0_rtx
)));
6682 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
6686 /* One field is non-constant. Load constant then overwrite
6690 rtx copy
= copy_rtx (vals
);
6692 /* Load constant part of vector, substitute neighboring value for
6694 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
6695 rs6000_expand_vector_init (target
, copy
);
6697 /* Insert variable. */
6698 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
6702 /* Construct the vector in memory one field at a time
6703 and load the whole vector. */
6704 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
6705 for (i
= 0; i
< n_elts
; i
++)
6706 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
6707 i
* GET_MODE_SIZE (inner_mode
)),
6708 XVECEXP (vals
, 0, i
));
6709 emit_move_insn (target
, mem
);
6712 /* Set field ELT of TARGET to VAL. */
6715 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
6717 machine_mode mode
= GET_MODE (target
);
6718 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6719 rtx reg
= gen_reg_rtx (mode
);
6721 int width
= GET_MODE_SIZE (inner_mode
);
6724 val
= force_reg (GET_MODE (val
), val
);
6726 if (VECTOR_MEM_VSX_P (mode
))
6728 rtx insn
= NULL_RTX
;
6729 rtx elt_rtx
= GEN_INT (elt
);
6731 if (mode
== V2DFmode
)
6732 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
6734 else if (mode
== V2DImode
)
6735 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
6737 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
6739 if (mode
== V4SImode
)
6740 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
6741 else if (mode
== V8HImode
)
6742 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
6743 else if (mode
== V16QImode
)
6744 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
6745 else if (mode
== V4SFmode
)
6746 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
6756 /* Simplify setting single element vectors like V1TImode. */
6757 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
6759 emit_move_insn (target
, gen_lowpart (mode
, val
));
6763 /* Load single variable value. */
6764 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
6765 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
6766 x
= gen_rtx_UNSPEC (VOIDmode
,
6767 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
6768 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
6770 gen_rtx_SET (reg
, mem
),
6773 /* Linear sequence. */
6774 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
6775 for (i
= 0; i
< 16; ++i
)
6776 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
6778 /* Set permute mask to insert element into target. */
6779 for (i
= 0; i
< width
; ++i
)
6780 XVECEXP (mask
, 0, elt
*width
+ i
)
6781 = GEN_INT (i
+ 0x10);
6782 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
6784 if (BYTES_BIG_ENDIAN
)
6785 x
= gen_rtx_UNSPEC (mode
,
6786 gen_rtvec (3, target
, reg
,
6787 force_reg (V16QImode
, x
)),
6791 if (TARGET_P9_VECTOR
)
6792 x
= gen_rtx_UNSPEC (mode
,
6793 gen_rtvec (3, reg
, target
,
6794 force_reg (V16QImode
, x
)),
6798 /* Invert selector. We prefer to generate VNAND on P8 so
6799 that future fusion opportunities can kick in, but must
6800 generate VNOR elsewhere. */
6801 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
6802 rtx iorx
= (TARGET_P8_VECTOR
6803 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
6804 : gen_rtx_AND (V16QImode
, notx
, notx
));
6805 rtx tmp
= gen_reg_rtx (V16QImode
);
6806 emit_insn (gen_rtx_SET (tmp
, iorx
));
6808 /* Permute with operands reversed and adjusted selector. */
6809 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
6814 emit_insn (gen_rtx_SET (target
, x
));
6817 /* Extract field ELT from VEC into TARGET. */
6820 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
6822 machine_mode mode
= GET_MODE (vec
);
6823 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6826 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
6833 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
6834 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
6837 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
6840 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
6843 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
6846 if (TARGET_DIRECT_MOVE_64BIT
)
6848 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
6854 if (TARGET_DIRECT_MOVE_64BIT
)
6856 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
6862 if (TARGET_DIRECT_MOVE_64BIT
)
6864 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
6870 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
6871 && TARGET_DIRECT_MOVE_64BIT
)
6873 if (GET_MODE (elt
) != DImode
)
6875 rtx tmp
= gen_reg_rtx (DImode
);
6876 convert_move (tmp
, elt
, 0);
6879 else if (!REG_P (elt
))
6880 elt
= force_reg (DImode
, elt
);
6885 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
6889 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
6893 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
6897 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
6901 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
6905 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
6913 gcc_assert (CONST_INT_P (elt
));
6915 /* Allocate mode-sized buffer. */
6916 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
6918 emit_move_insn (mem
, vec
);
6920 /* Add offset to field within buffer matching vector element. */
6921 mem
= adjust_address_nv (mem
, inner_mode
,
6922 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
6924 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
6927 /* Helper function to return the register number of a RTX. */
6929 regno_or_subregno (rtx op
)
6933 else if (SUBREG_P (op
))
6934 return subreg_regno (op
);
6939 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6940 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6941 temporary (BASE_TMP) to fixup the address. Return the new memory address
6942 that is valid for reads or writes to a given register (SCALAR_REG). */
6945 rs6000_adjust_vec_address (rtx scalar_reg
,
6949 machine_mode scalar_mode
)
6951 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
6952 rtx addr
= XEXP (mem
, 0);
6957 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6958 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
6960 /* Calculate what we need to add to the address to get the element
6962 if (CONST_INT_P (element
))
6963 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
6966 int byte_shift
= exact_log2 (scalar_size
);
6967 gcc_assert (byte_shift
>= 0);
6969 if (byte_shift
== 0)
6970 element_offset
= element
;
6974 if (TARGET_POWERPC64
)
6975 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
6977 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
6979 element_offset
= base_tmp
;
6983 /* Create the new address pointing to the element within the vector. If we
6984 are adding 0, we don't have to change the address. */
6985 if (element_offset
== const0_rtx
)
6988 /* A simple indirect address can be converted into a reg + offset
6990 else if (REG_P (addr
) || SUBREG_P (addr
))
6991 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
6993 /* Optimize D-FORM addresses with constant offset with a constant element, to
6994 include the element offset in the address directly. */
6995 else if (GET_CODE (addr
) == PLUS
)
6997 rtx op0
= XEXP (addr
, 0);
6998 rtx op1
= XEXP (addr
, 1);
7001 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7002 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7004 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7005 rtx offset_rtx
= GEN_INT (offset
);
7007 if (IN_RANGE (offset
, -32768, 32767)
7008 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7009 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7012 emit_move_insn (base_tmp
, offset_rtx
);
7013 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7018 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7019 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7021 /* Note, ADDI requires the register being added to be a base
7022 register. If the register was R0, load it up into the temporary
7025 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7027 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7028 gcc_assert (insn
!= NULL_RTX
);
7033 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7035 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7036 gcc_assert (insn
!= NULL_RTX
);
7042 emit_move_insn (base_tmp
, op1
);
7043 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7046 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7052 emit_move_insn (base_tmp
, addr
);
7053 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7056 /* If we have a PLUS, we need to see whether the particular register class
7057 allows for D-FORM or X-FORM addressing. */
7058 if (GET_CODE (new_addr
) == PLUS
)
7060 rtx op1
= XEXP (new_addr
, 1);
7061 addr_mask_type addr_mask
;
7062 int scalar_regno
= regno_or_subregno (scalar_reg
);
7064 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7065 if (INT_REGNO_P (scalar_regno
))
7066 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7068 else if (FP_REGNO_P (scalar_regno
))
7069 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7071 else if (ALTIVEC_REGNO_P (scalar_regno
))
7072 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7077 if (REG_P (op1
) || SUBREG_P (op1
))
7078 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7080 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7083 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7084 valid_addr_p
= true;
7087 valid_addr_p
= false;
7091 emit_move_insn (base_tmp
, new_addr
);
7092 new_addr
= base_tmp
;
7095 return change_address (mem
, scalar_mode
, new_addr
);
7098 /* Split a variable vec_extract operation into the component instructions. */
7101 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7104 machine_mode mode
= GET_MODE (src
);
7105 machine_mode scalar_mode
= GET_MODE (dest
);
7106 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7107 int byte_shift
= exact_log2 (scalar_size
);
7109 gcc_assert (byte_shift
>= 0);
7111 /* If we are given a memory address, optimize to load just the element. We
7112 don't have to adjust the vector element number on little endian
7116 gcc_assert (REG_P (tmp_gpr
));
7117 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7118 tmp_gpr
, scalar_mode
));
7122 else if (REG_P (src
) || SUBREG_P (src
))
7124 int bit_shift
= byte_shift
+ 3;
7126 int dest_regno
= regno_or_subregno (dest
);
7127 int src_regno
= regno_or_subregno (src
);
7128 int element_regno
= regno_or_subregno (element
);
7130 gcc_assert (REG_P (tmp_gpr
));
7132 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7133 a general purpose register. */
7134 if (TARGET_P9_VECTOR
7135 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7136 && INT_REGNO_P (dest_regno
)
7137 && ALTIVEC_REGNO_P (src_regno
)
7138 && INT_REGNO_P (element_regno
))
7140 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7141 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7143 if (mode
== V16QImode
)
7144 emit_insn (BYTES_BIG_ENDIAN
7145 ? gen_vextublx (dest_si
, element_si
, src
)
7146 : gen_vextubrx (dest_si
, element_si
, src
));
7148 else if (mode
== V8HImode
)
7150 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7151 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7152 emit_insn (BYTES_BIG_ENDIAN
7153 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7154 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7160 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7161 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7162 emit_insn (BYTES_BIG_ENDIAN
7163 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7164 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7171 gcc_assert (REG_P (tmp_altivec
));
7173 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7174 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7175 will shift the element into the upper position (adding 3 to convert a
7176 byte shift into a bit shift). */
7177 if (scalar_size
== 8)
7179 if (!BYTES_BIG_ENDIAN
)
7181 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7187 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7189 emit_insn (gen_rtx_SET (tmp_gpr
,
7190 gen_rtx_AND (DImode
,
7191 gen_rtx_ASHIFT (DImode
,
7198 if (!BYTES_BIG_ENDIAN
)
7200 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7202 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7203 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7209 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7212 /* Get the value into the lower byte of the Altivec register where VSLO
7214 if (TARGET_P9_VECTOR
)
7215 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7216 else if (can_create_pseudo_p ())
7217 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7220 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7221 emit_move_insn (tmp_di
, tmp_gpr
);
7222 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7225 /* Do the VSLO to get the value into the final location. */
7229 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7233 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7238 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7239 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7240 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7241 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7244 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7252 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7253 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7254 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7255 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7257 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7258 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7259 GEN_INT (64 - (8 * scalar_size
))));
7273 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7274 selects whether the alignment is abi mandated, optional, or
7275 both abi and optional alignment. */
7278 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7280 if (how
!= align_opt
)
7282 if (TREE_CODE (type
) == VECTOR_TYPE
&& align
< 128)
7286 if (how
!= align_abi
)
7288 if (TREE_CODE (type
) == ARRAY_TYPE
7289 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7291 if (align
< BITS_PER_WORD
)
7292 align
= BITS_PER_WORD
;
7299 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7300 instructions simply ignore the low bits; VSX memory instructions
7301 are aligned to 4 or 8 bytes. */
7304 rs6000_slow_unaligned_access (machine_mode mode
, unsigned int align
)
7306 return (STRICT_ALIGNMENT
7307 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7308 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && align
< 32)
7309 || ((VECTOR_MODE_P (mode
) || FLOAT128_VECTOR_P (mode
))
7310 && (int) align
< VECTOR_ALIGN (mode
)))));
7313 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7316 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7318 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7320 if (computed
!= 128)
7323 if (!warned
&& warn_psabi
)
7326 inform (input_location
,
7327 "the layout of aggregates containing vectors with"
7328 " %d-byte alignment has changed in GCC 5",
7329 computed
/ BITS_PER_UNIT
);
7332 /* In current GCC there is no special case. */
7339 /* AIX increases natural record alignment to doubleword if the first
7340 field is an FP double while the FP fields remain word aligned. */
7343 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7344 unsigned int specified
)
7346 unsigned int align
= MAX (computed
, specified
);
7347 tree field
= TYPE_FIELDS (type
);
7349 /* Skip all non field decls */
7350 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7351 field
= DECL_CHAIN (field
);
7353 if (field
!= NULL
&& field
!= type
)
7355 type
= TREE_TYPE (field
);
7356 while (TREE_CODE (type
) == ARRAY_TYPE
)
7357 type
= TREE_TYPE (type
);
7359 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7360 align
= MAX (align
, 64);
7366 /* Darwin increases record alignment to the natural alignment of
7370 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7371 unsigned int specified
)
7373 unsigned int align
= MAX (computed
, specified
);
7375 if (TYPE_PACKED (type
))
7378 /* Find the first field, looking down into aggregates. */
7380 tree field
= TYPE_FIELDS (type
);
7381 /* Skip all non field decls */
7382 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7383 field
= DECL_CHAIN (field
);
7386 /* A packed field does not contribute any extra alignment. */
7387 if (DECL_PACKED (field
))
7389 type
= TREE_TYPE (field
);
7390 while (TREE_CODE (type
) == ARRAY_TYPE
)
7391 type
= TREE_TYPE (type
);
7392 } while (AGGREGATE_TYPE_P (type
));
7394 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
7395 align
= MAX (align
, TYPE_ALIGN (type
));
7400 /* Return 1 for an operand in small memory on V.4/eabi. */
7403 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
7404 machine_mode mode ATTRIBUTE_UNUSED
)
7409 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
7412 if (DEFAULT_ABI
!= ABI_V4
)
7415 if (GET_CODE (op
) == SYMBOL_REF
)
7418 else if (GET_CODE (op
) != CONST
7419 || GET_CODE (XEXP (op
, 0)) != PLUS
7420 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
7421 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
7426 rtx sum
= XEXP (op
, 0);
7427 HOST_WIDE_INT summand
;
7429 /* We have to be careful here, because it is the referenced address
7430 that must be 32k from _SDA_BASE_, not just the symbol. */
7431 summand
= INTVAL (XEXP (sum
, 1));
7432 if (summand
< 0 || summand
> g_switch_value
)
7435 sym_ref
= XEXP (sum
, 0);
7438 return SYMBOL_REF_SMALL_P (sym_ref
);
7444 /* Return true if either operand is a general purpose register. */
7447 gpr_or_gpr_p (rtx op0
, rtx op1
)
7449 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
7450 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
7453 /* Return true if this is a move direct operation between GPR registers and
7454 floating point/VSX registers. */
7457 direct_move_p (rtx op0
, rtx op1
)
7461 if (!REG_P (op0
) || !REG_P (op1
))
7464 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
7467 regno0
= REGNO (op0
);
7468 regno1
= REGNO (op1
);
7469 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
7472 if (INT_REGNO_P (regno0
))
7473 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
7475 else if (INT_REGNO_P (regno1
))
7477 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
7480 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
7487 /* Return true if the OFFSET is valid for the quad address instructions that
7488 use d-form (register + offset) addressing. */
7491 quad_address_offset_p (HOST_WIDE_INT offset
)
7493 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
7496 /* Return true if the ADDR is an acceptable address for a quad memory
7497 operation of mode MODE (either LQ/STQ for general purpose registers, or
7498 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7499 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7500 3.0 LXV/STXV instruction. */
7503 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
7507 if (GET_MODE_SIZE (mode
) != 16)
7510 if (legitimate_indirect_address_p (addr
, strict
))
7513 if (VECTOR_MODE_P (mode
) && !mode_supports_dq_form (mode
))
7516 if (GET_CODE (addr
) != PLUS
)
7519 op0
= XEXP (addr
, 0);
7520 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
7523 op1
= XEXP (addr
, 1);
7524 if (!CONST_INT_P (op1
))
7527 return quad_address_offset_p (INTVAL (op1
));
7530 /* Return true if this is a load or store quad operation. This function does
7531 not handle the atomic quad memory instructions. */
7534 quad_load_store_p (rtx op0
, rtx op1
)
7538 if (!TARGET_QUAD_MEMORY
)
7541 else if (REG_P (op0
) && MEM_P (op1
))
7542 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
7543 && quad_memory_operand (op1
, GET_MODE (op1
))
7544 && !reg_overlap_mentioned_p (op0
, op1
));
7546 else if (MEM_P (op0
) && REG_P (op1
))
7547 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
7548 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
7553 if (TARGET_DEBUG_ADDR
)
7555 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
7556 ret
? "true" : "false");
7557 debug_rtx (gen_rtx_SET (op0
, op1
));
7563 /* Given an address, return a constant offset term if one exists. */
7566 address_offset (rtx op
)
7568 if (GET_CODE (op
) == PRE_INC
7569 || GET_CODE (op
) == PRE_DEC
)
7571 else if (GET_CODE (op
) == PRE_MODIFY
7572 || GET_CODE (op
) == LO_SUM
)
7575 if (GET_CODE (op
) == CONST
)
7578 if (GET_CODE (op
) == PLUS
)
7581 if (CONST_INT_P (op
))
7587 /* Return true if the MEM operand is a memory operand suitable for use
7588 with a (full width, possibly multiple) gpr load/store. On
7589 powerpc64 this means the offset must be divisible by 4.
7590 Implements 'Y' constraint.
7592 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7593 a constraint function we know the operand has satisfied a suitable
7594 memory predicate. Also accept some odd rtl generated by reload
7595 (see rs6000_legitimize_reload_address for various forms). It is
7596 important that reload rtl be accepted by appropriate constraints
7597 but not by the operand predicate.
7599 Offsetting a lo_sum should not be allowed, except where we know by
7600 alignment that a 32k boundary is not crossed, but see the ???
7601 comment in rs6000_legitimize_reload_address. Note that by
7602 "offsetting" here we mean a further offset to access parts of the
7603 MEM. It's fine to have a lo_sum where the inner address is offset
7604 from a sym, since the same sym+offset will appear in the high part
7605 of the address calculation. */
7608 mem_operand_gpr (rtx op
, machine_mode mode
)
7610 unsigned HOST_WIDE_INT offset
;
7612 rtx addr
= XEXP (op
, 0);
7614 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7616 && (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
7617 && mode_supports_pre_incdec_p (mode
)
7618 && legitimate_indirect_address_p (XEXP (addr
, 0), false))
7621 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7622 if (!rs6000_offsettable_memref_p (op
, mode
, false))
7625 op
= address_offset (addr
);
7629 offset
= INTVAL (op
);
7630 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
7633 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
7637 if (GET_CODE (addr
) == LO_SUM
)
7638 /* For lo_sum addresses, we must allow any offset except one that
7639 causes a wrap, so test only the low 16 bits. */
7640 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
7642 return offset
+ 0x8000 < 0x10000u
- extra
;
7645 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7646 enforce an offset divisible by 4 even for 32-bit. */
7649 mem_operand_ds_form (rtx op
, machine_mode mode
)
7651 unsigned HOST_WIDE_INT offset
;
7653 rtx addr
= XEXP (op
, 0);
7655 if (!offsettable_address_p (false, mode
, addr
))
7658 op
= address_offset (addr
);
7662 offset
= INTVAL (op
);
7663 if ((offset
& 3) != 0)
7666 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
7670 if (GET_CODE (addr
) == LO_SUM
)
7671 /* For lo_sum addresses, we must allow any offset except one that
7672 causes a wrap, so test only the low 16 bits. */
7673 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
7675 return offset
+ 0x8000 < 0x10000u
- extra
;
7678 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7681 reg_offset_addressing_ok_p (machine_mode mode
)
7695 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7696 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7697 a vector mode, if we want to use the VSX registers to move it around,
7698 we need to restrict ourselves to reg+reg addressing. Similarly for
7699 IEEE 128-bit floating point that is passed in a single vector
7701 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
7702 return mode_supports_dq_form (mode
);
7706 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7707 addressing for the LFIWZX and STFIWX instructions. */
7708 if (TARGET_NO_SDMODE_STACK
)
7720 virtual_stack_registers_memory_p (rtx op
)
7724 if (GET_CODE (op
) == REG
)
7725 regnum
= REGNO (op
);
7727 else if (GET_CODE (op
) == PLUS
7728 && GET_CODE (XEXP (op
, 0)) == REG
7729 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
7730 regnum
= REGNO (XEXP (op
, 0));
7735 return (regnum
>= FIRST_VIRTUAL_REGISTER
7736 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
7739 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7740 is known to not straddle a 32k boundary. This function is used
7741 to determine whether -mcmodel=medium code can use TOC pointer
7742 relative addressing for OP. This means the alignment of the TOC
7743 pointer must also be taken into account, and unfortunately that is
7746 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7747 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7751 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
7755 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
7757 if (GET_CODE (op
) != SYMBOL_REF
)
7760 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7762 if (mode_supports_dq_form (mode
))
7765 dsize
= GET_MODE_SIZE (mode
);
7766 decl
= SYMBOL_REF_DECL (op
);
7772 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7773 replacing memory addresses with an anchor plus offset. We
7774 could find the decl by rummaging around in the block->objects
7775 VEC for the given offset but that seems like too much work. */
7776 dalign
= BITS_PER_UNIT
;
7777 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
7778 && SYMBOL_REF_ANCHOR_P (op
)
7779 && SYMBOL_REF_BLOCK (op
) != NULL
)
7781 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
7783 dalign
= block
->alignment
;
7784 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
7786 else if (CONSTANT_POOL_ADDRESS_P (op
))
7788 /* It would be nice to have get_pool_align().. */
7789 machine_mode cmode
= get_pool_mode (op
);
7791 dalign
= GET_MODE_ALIGNMENT (cmode
);
7794 else if (DECL_P (decl
))
7796 dalign
= DECL_ALIGN (decl
);
7800 /* Allow BLKmode when the entire object is known to not
7801 cross a 32k boundary. */
7802 if (!DECL_SIZE_UNIT (decl
))
7805 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
7808 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
7812 dalign
/= BITS_PER_UNIT
;
7813 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
7814 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
7815 return dalign
>= dsize
;
7821 /* Find how many bits of the alignment we know for this access. */
7822 dalign
/= BITS_PER_UNIT
;
7823 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
7824 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
7826 lsb
= offset
& -offset
;
7830 return dalign
>= dsize
;
7834 constant_pool_expr_p (rtx op
)
7838 split_const (op
, &base
, &offset
);
7839 return (GET_CODE (base
) == SYMBOL_REF
7840 && CONSTANT_POOL_ADDRESS_P (base
)
7841 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
7844 /* These are only used to pass through from print_operand/print_operand_address
7845 to rs6000_output_addr_const_extra over the intervening function
7846 output_addr_const which is not target code. */
7847 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
7849 /* Return true if OP is a toc pointer relative address (the output
7850 of create_TOC_reference). If STRICT, do not match non-split
7851 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7852 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7853 TOCREL_OFFSET_RET respectively. */
7856 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
7857 const_rtx
*tocrel_offset_ret
)
7862 if (TARGET_CMODEL
!= CMODEL_SMALL
)
7864 /* When strict ensure we have everything tidy. */
7866 && !(GET_CODE (op
) == LO_SUM
7867 && REG_P (XEXP (op
, 0))
7868 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
7871 /* When not strict, allow non-split TOC addresses and also allow
7872 (lo_sum (high ..)) TOC addresses created during reload. */
7873 if (GET_CODE (op
) == LO_SUM
)
7877 const_rtx tocrel_base
= op
;
7878 const_rtx tocrel_offset
= const0_rtx
;
7880 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
7882 tocrel_base
= XEXP (op
, 0);
7883 tocrel_offset
= XEXP (op
, 1);
7886 if (tocrel_base_ret
)
7887 *tocrel_base_ret
= tocrel_base
;
7888 if (tocrel_offset_ret
)
7889 *tocrel_offset_ret
= tocrel_offset
;
7891 return (GET_CODE (tocrel_base
) == UNSPEC
7892 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
7893 && REG_P (XVECEXP (tocrel_base
, 0, 1))
7894 && REGNO (XVECEXP (tocrel_base
, 0, 1)) == TOC_REGISTER
);
7897 /* Return true if X is a constant pool address, and also for cmodel=medium
7898 if X is a toc-relative address known to be offsettable within MODE. */
7901 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
7904 const_rtx tocrel_base
, tocrel_offset
;
7905 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
7906 && (TARGET_CMODEL
!= CMODEL_MEDIUM
7907 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
7909 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
7910 INTVAL (tocrel_offset
), mode
)));
7914 legitimate_small_data_p (machine_mode mode
, rtx x
)
7916 return (DEFAULT_ABI
== ABI_V4
7917 && !flag_pic
&& !TARGET_TOC
7918 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
7919 && small_data_operand (x
, mode
));
7923 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
7924 bool strict
, bool worst_case
)
7926 unsigned HOST_WIDE_INT offset
;
7929 if (GET_CODE (x
) != PLUS
)
7931 if (!REG_P (XEXP (x
, 0)))
7933 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
7935 if (mode_supports_dq_form (mode
))
7936 return quad_address_p (x
, mode
, strict
);
7937 if (!reg_offset_addressing_ok_p (mode
))
7938 return virtual_stack_registers_memory_p (x
);
7939 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
7941 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
7944 offset
= INTVAL (XEXP (x
, 1));
7951 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7953 if (VECTOR_MEM_VSX_P (mode
))
7958 if (!TARGET_POWERPC64
)
7960 else if (offset
& 3)
7973 if (!TARGET_POWERPC64
)
7975 else if (offset
& 3)
7984 return offset
< 0x10000 - extra
;
7988 legitimate_indexed_address_p (rtx x
, int strict
)
7992 if (GET_CODE (x
) != PLUS
)
7998 return (REG_P (op0
) && REG_P (op1
)
7999 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8000 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8001 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8002 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8006 avoiding_indexed_address_p (machine_mode mode
)
8008 /* Avoid indexed addressing for modes that have non-indexed
8009 load/store instruction forms. */
8010 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8014 legitimate_indirect_address_p (rtx x
, int strict
)
8016 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8020 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8022 if (!TARGET_MACHO
|| !flag_pic
8023 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8027 if (GET_CODE (x
) != LO_SUM
)
8029 if (GET_CODE (XEXP (x
, 0)) != REG
)
8031 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8035 return CONSTANT_P (x
);
8039 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8041 if (GET_CODE (x
) != LO_SUM
)
8043 if (GET_CODE (XEXP (x
, 0)) != REG
)
8045 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8047 /* quad word addresses are restricted, and we can't use LO_SUM. */
8048 if (mode_supports_dq_form (mode
))
8052 if (TARGET_ELF
|| TARGET_MACHO
)
8056 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8058 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8059 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8060 recognizes some LO_SUM addresses as valid although this
8061 function says opposite. In most cases, LRA through different
8062 transformations can generate correct code for address reloads.
8063 It can not manage only some LO_SUM cases. So we need to add
8064 code analogous to one in rs6000_legitimize_reload_address for
8065 LOW_SUM here saying that some addresses are still valid. */
8066 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8067 && small_toc_ref (x
, VOIDmode
));
8068 if (TARGET_TOC
&& ! large_toc_ok
)
8070 if (GET_MODE_NUNITS (mode
) != 1)
8072 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8073 && !(/* ??? Assume floating point reg based on mode? */
8074 TARGET_HARD_FLOAT
&& (mode
== DFmode
|| mode
== DDmode
)))
8077 return CONSTANT_P (x
) || large_toc_ok
;
8084 /* Try machine-dependent ways of modifying an illegitimate address
8085 to be legitimate. If we find one, return the new, valid address.
8086 This is used from only one place: `memory_address' in explow.c.
8088 OLDX is the address as it was before break_out_memory_refs was
8089 called. In some cases it is useful to look at this to decide what
8092 It is always safe for this function to do nothing. It exists to
8093 recognize opportunities to optimize the output.
8095 On RS/6000, first check for the sum of a register with a constant
8096 integer that is out of range. If so, generate code to add the
8097 constant with the low-order 16 bits masked to the register and force
8098 this result into another register (this can be done with `cau').
8099 Then generate an address of REG+(CONST&0xffff), allowing for the
8100 possibility of bit 16 being a one.
8102 Then check for the sum of a register and something not constant, try to
8103 load the other things into a register and return the sum. */
8106 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8111 if (!reg_offset_addressing_ok_p (mode
)
8112 || mode_supports_dq_form (mode
))
8114 if (virtual_stack_registers_memory_p (x
))
8117 /* In theory we should not be seeing addresses of the form reg+0,
8118 but just in case it is generated, optimize it away. */
8119 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8120 return force_reg (Pmode
, XEXP (x
, 0));
8122 /* For TImode with load/store quad, restrict addresses to just a single
8123 pointer, so it works with both GPRs and VSX registers. */
8124 /* Make sure both operands are registers. */
8125 else if (GET_CODE (x
) == PLUS
8126 && (mode
!= TImode
|| !TARGET_VSX
))
8127 return gen_rtx_PLUS (Pmode
,
8128 force_reg (Pmode
, XEXP (x
, 0)),
8129 force_reg (Pmode
, XEXP (x
, 1)));
8131 return force_reg (Pmode
, x
);
8133 if (GET_CODE (x
) == SYMBOL_REF
)
8135 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8137 return rs6000_legitimize_tls_address (x
, model
);
8149 /* As in legitimate_offset_address_p we do not assume
8150 worst-case. The mode here is just a hint as to the registers
8151 used. A TImode is usually in gprs, but may actually be in
8152 fprs. Leave worst-case scenario for reload to handle via
8153 insn constraints. PTImode is only GPRs. */
8160 if (GET_CODE (x
) == PLUS
8161 && GET_CODE (XEXP (x
, 0)) == REG
8162 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8163 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8164 >= 0x10000 - extra
))
8166 HOST_WIDE_INT high_int
, low_int
;
8168 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8169 if (low_int
>= 0x8000 - extra
)
8171 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8172 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8173 GEN_INT (high_int
)), 0);
8174 return plus_constant (Pmode
, sum
, low_int
);
8176 else if (GET_CODE (x
) == PLUS
8177 && GET_CODE (XEXP (x
, 0)) == REG
8178 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8179 && GET_MODE_NUNITS (mode
) == 1
8180 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8181 || (/* ??? Assume floating point reg based on mode? */
8182 TARGET_HARD_FLOAT
&& (mode
== DFmode
|| mode
== DDmode
)))
8183 && !avoiding_indexed_address_p (mode
))
8185 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8186 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8188 else if ((TARGET_ELF
8190 || !MACHO_DYNAMIC_NO_PIC_P
8196 && GET_CODE (x
) != CONST_INT
8197 && GET_CODE (x
) != CONST_WIDE_INT
8198 && GET_CODE (x
) != CONST_DOUBLE
8200 && GET_MODE_NUNITS (mode
) == 1
8201 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8202 || (/* ??? Assume floating point reg based on mode? */
8203 TARGET_HARD_FLOAT
&& (mode
== DFmode
|| mode
== DDmode
))))
8205 rtx reg
= gen_reg_rtx (Pmode
);
8207 emit_insn (gen_elf_high (reg
, x
));
8209 emit_insn (gen_macho_high (reg
, x
));
8210 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8213 && GET_CODE (x
) == SYMBOL_REF
8214 && constant_pool_expr_p (x
)
8215 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8216 return create_TOC_reference (x
, NULL_RTX
);
8221 /* Debug version of rs6000_legitimize_address. */
8223 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8229 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8230 insns
= get_insns ();
8236 "\nrs6000_legitimize_address: mode %s, old code %s, "
8237 "new code %s, modified\n",
8238 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8239 GET_RTX_NAME (GET_CODE (ret
)));
8241 fprintf (stderr
, "Original address:\n");
8244 fprintf (stderr
, "oldx:\n");
8247 fprintf (stderr
, "New address:\n");
8252 fprintf (stderr
, "Insns added:\n");
8253 debug_rtx_list (insns
, 20);
8259 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8260 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8271 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8272 We need to emit DTP-relative relocations. */
8274 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8276 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8281 fputs ("\t.long\t", file
);
8284 fputs (DOUBLE_INT_ASM_OP
, file
);
8289 output_addr_const (file
, x
);
8291 fputs ("@dtprel+0x8000", file
);
8292 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8294 switch (SYMBOL_REF_TLS_MODEL (x
))
8298 case TLS_MODEL_LOCAL_EXEC
:
8299 fputs ("@le", file
);
8301 case TLS_MODEL_INITIAL_EXEC
:
8302 fputs ("@ie", file
);
8304 case TLS_MODEL_GLOBAL_DYNAMIC
:
8305 case TLS_MODEL_LOCAL_DYNAMIC
:
8314 /* Return true if X is a symbol that refers to real (rather than emulated)
8318 rs6000_real_tls_symbol_ref_p (rtx x
)
8320 return (GET_CODE (x
) == SYMBOL_REF
8321 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8324 /* In the name of slightly smaller debug output, and to cater to
8325 general assembler lossage, recognize various UNSPEC sequences
8326 and turn them back into a direct symbol reference. */
8329 rs6000_delegitimize_address (rtx orig_x
)
8333 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8339 if (TARGET_CMODEL
!= CMODEL_SMALL
8340 && GET_CODE (y
) == LO_SUM
)
8344 if (GET_CODE (y
) == PLUS
8345 && GET_MODE (y
) == Pmode
8346 && CONST_INT_P (XEXP (y
, 1)))
8348 offset
= XEXP (y
, 1);
8352 if (GET_CODE (y
) == UNSPEC
8353 && XINT (y
, 1) == UNSPEC_TOCREL
)
8355 y
= XVECEXP (y
, 0, 0);
8358 /* Do not associate thread-local symbols with the original
8359 constant pool symbol. */
8361 && GET_CODE (y
) == SYMBOL_REF
8362 && CONSTANT_POOL_ADDRESS_P (y
)
8363 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
8367 if (offset
!= NULL_RTX
)
8368 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
8369 if (!MEM_P (orig_x
))
8372 return replace_equiv_address_nv (orig_x
, y
);
8376 && GET_CODE (orig_x
) == LO_SUM
8377 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
8379 y
= XEXP (XEXP (orig_x
, 1), 0);
8380 if (GET_CODE (y
) == UNSPEC
8381 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
8382 return XVECEXP (y
, 0, 0);
8388 /* Return true if X shouldn't be emitted into the debug info.
8389 The linker doesn't like .toc section references from
8390 .debug_* sections, so reject .toc section symbols. */
8393 rs6000_const_not_ok_for_debug_p (rtx x
)
8395 if (GET_CODE (x
) == UNSPEC
)
8397 if (GET_CODE (x
) == SYMBOL_REF
8398 && CONSTANT_POOL_ADDRESS_P (x
))
8400 rtx c
= get_pool_constant (x
);
8401 machine_mode cmode
= get_pool_mode (x
);
8402 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
8409 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8412 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
8414 int icode
= INSN_CODE (insn
);
8416 /* Reject creating doloop insns. Combine should not be allowed
8417 to create these for a number of reasons:
8418 1) In a nested loop, if combine creates one of these in an
8419 outer loop and the register allocator happens to allocate ctr
8420 to the outer loop insn, then the inner loop can't use ctr.
8421 Inner loops ought to be more highly optimized.
8422 2) Combine often wants to create one of these from what was
8423 originally a three insn sequence, first combining the three
8424 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8425 allocated ctr, the splitter takes use back to the three insn
8426 sequence. It's better to stop combine at the two insn
8428 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8429 insns, the register allocator sometimes uses floating point
8430 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8431 jump insn and output reloads are not implemented for jumps,
8432 the ctrsi/ctrdi splitters need to handle all possible cases.
8433 That's a pain, and it gets to be seriously difficult when a
8434 splitter that runs after reload needs memory to transfer from
8435 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8436 for the difficult case. It's better to not create problems
8437 in the first place. */
8438 if (icode
!= CODE_FOR_nothing
8439 && (icode
== CODE_FOR_bdz_si
8440 || icode
== CODE_FOR_bdz_di
8441 || icode
== CODE_FOR_bdnz_si
8442 || icode
== CODE_FOR_bdnz_di
8443 || icode
== CODE_FOR_bdztf_si
8444 || icode
== CODE_FOR_bdztf_di
8445 || icode
== CODE_FOR_bdnztf_si
8446 || icode
== CODE_FOR_bdnztf_di
))
8452 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8454 static GTY(()) rtx rs6000_tls_symbol
;
8456 rs6000_tls_get_addr (void)
8458 if (!rs6000_tls_symbol
)
8459 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
8461 return rs6000_tls_symbol
;
8464 /* Construct the SYMBOL_REF for TLS GOT references. */
8466 static GTY(()) rtx rs6000_got_symbol
;
8468 rs6000_got_sym (void)
8470 if (!rs6000_got_symbol
)
8472 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
8473 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
8474 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
8477 return rs6000_got_symbol
;
8480 /* AIX Thread-Local Address support. */
8483 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
8485 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
8489 name
= XSTR (addr
, 0);
8490 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8491 or the symbol will be in TLS private data section. */
8492 if (name
[strlen (name
) - 1] != ']'
8493 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
8494 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
8496 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
8497 strcpy (tlsname
, name
);
8499 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
8500 tlsaddr
= copy_rtx (addr
);
8501 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
8506 /* Place addr into TOC constant pool. */
8507 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
8509 /* Output the TOC entry and create the MEM referencing the value. */
8510 if (constant_pool_expr_p (XEXP (sym
, 0))
8511 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
8513 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
8514 mem
= gen_const_mem (Pmode
, tocref
);
8515 set_mem_alias_set (mem
, get_TOC_alias_set ());
8520 /* Use global-dynamic for local-dynamic. */
8521 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
8522 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
8524 /* Create new TOC reference for @m symbol. */
8525 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
8526 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
8527 strcpy (tlsname
, "*LCM");
8528 strcat (tlsname
, name
+ 3);
8529 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
8530 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
8531 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
8532 rtx modmem
= gen_const_mem (Pmode
, tocref
);
8533 set_mem_alias_set (modmem
, get_TOC_alias_set ());
8535 rtx modreg
= gen_reg_rtx (Pmode
);
8536 emit_insn (gen_rtx_SET (modreg
, modmem
));
8538 tmpreg
= gen_reg_rtx (Pmode
);
8539 emit_insn (gen_rtx_SET (tmpreg
, mem
));
8541 dest
= gen_reg_rtx (Pmode
);
8543 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
8545 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
8548 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8549 else if (TARGET_32BIT
)
8551 tlsreg
= gen_reg_rtx (SImode
);
8552 emit_insn (gen_tls_get_tpointer (tlsreg
));
8555 tlsreg
= gen_rtx_REG (DImode
, 13);
8557 /* Load the TOC value into temporary register. */
8558 tmpreg
= gen_reg_rtx (Pmode
);
8559 emit_insn (gen_rtx_SET (tmpreg
, mem
));
8560 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
8561 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
8563 /* Add TOC symbol value to TLS pointer. */
8564 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
8569 /* Mess with a call, to make it look like the tls_gdld insns when
8570 !TARGET_TLS_MARKERS. These insns have an extra unspec to
8571 differentiate them from standard calls, because they need to emit
8572 the arg setup insns as well as the actual call. That keeps the
8573 arg setup insns immediately adjacent to the branch and link. */
8576 edit_tls_call_insn (rtx arg
)
8578 rtx call_insn
= last_call_insn ();
8579 if (!TARGET_TLS_MARKERS
)
8581 rtx patt
= PATTERN (call_insn
);
8582 gcc_assert (GET_CODE (patt
) == PARALLEL
);
8583 rtvec orig
= XVEC (patt
, 0);
8584 rtvec v
= rtvec_alloc (GET_NUM_ELEM (orig
) + 1);
8585 gcc_assert (GET_NUM_ELEM (orig
) > 0);
8586 /* The (set (..) (call (mem ..))). */
8587 RTVEC_ELT (v
, 0) = RTVEC_ELT (orig
, 0);
8588 /* The extra unspec. */
8589 RTVEC_ELT (v
, 1) = arg
;
8590 /* All other assorted call pattern pieces. */
8591 for (int i
= 1; i
< GET_NUM_ELEM (orig
); i
++)
8592 RTVEC_ELT (v
, i
+ 1) = RTVEC_ELT (orig
, i
);
8595 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
8596 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
8597 pic_offset_table_rtx
);
8600 /* Passes the tls arg value for global dynamic and local dynamic
8601 emit_library_call_value in rs6000_legitimize_tls_address to
8602 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8603 marker relocs put on __tls_get_addr calls. */
8604 static rtx global_tlsarg
;
8606 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8607 this (thread-local) address. */
8610 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
8615 return rs6000_legitimize_tls_address_aix (addr
, model
);
8617 dest
= gen_reg_rtx (Pmode
);
8618 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
8624 tlsreg
= gen_rtx_REG (Pmode
, 13);
8625 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
8629 tlsreg
= gen_rtx_REG (Pmode
, 2);
8630 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
8634 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
8638 tmp
= gen_reg_rtx (Pmode
);
8641 tlsreg
= gen_rtx_REG (Pmode
, 13);
8642 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
8646 tlsreg
= gen_rtx_REG (Pmode
, 2);
8647 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
8651 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
8653 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
8658 rtx got
, tga
, tmp1
, tmp2
;
8660 /* We currently use relocations like @got@tlsgd for tls, which
8661 means the linker will handle allocation of tls entries, placing
8662 them in the .got section. So use a pointer to the .got section,
8663 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8664 or to secondary GOT sections used by 32-bit -fPIC. */
8666 got
= gen_rtx_REG (Pmode
, 2);
8670 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
8673 rtx gsym
= rs6000_got_sym ();
8674 got
= gen_reg_rtx (Pmode
);
8676 rs6000_emit_move (got
, gsym
, Pmode
);
8681 tmp1
= gen_reg_rtx (Pmode
);
8682 tmp2
= gen_reg_rtx (Pmode
);
8683 mem
= gen_const_mem (Pmode
, tmp1
);
8684 lab
= gen_label_rtx ();
8685 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
8686 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
8687 if (TARGET_LINK_STACK
)
8688 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
8689 emit_move_insn (tmp2
, mem
);
8690 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
8691 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
8696 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
8698 rtx arg
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, addr
, got
),
8700 global_tlsarg
= arg
;
8701 rtx argreg
= const0_rtx
;
8702 if (TARGET_TLS_MARKERS
)
8704 argreg
= gen_rtx_REG (Pmode
, 3);
8705 emit_insn (gen_rtx_SET (argreg
, arg
));
8708 tga
= rs6000_tls_get_addr ();
8709 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
8711 global_tlsarg
= NULL_RTX
;
8713 edit_tls_call_insn (arg
);
8715 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
8717 rtx arg
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, got
),
8719 global_tlsarg
= arg
;
8720 rtx argreg
= const0_rtx
;
8721 if (TARGET_TLS_MARKERS
)
8723 argreg
= gen_rtx_REG (Pmode
, 3);
8724 emit_insn (gen_rtx_SET (argreg
, arg
));
8727 tga
= rs6000_tls_get_addr ();
8728 tmp1
= gen_reg_rtx (Pmode
);
8729 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
8731 global_tlsarg
= NULL_RTX
;
8733 edit_tls_call_insn (arg
);
8735 if (rs6000_tls_size
== 16)
8738 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
8740 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
8742 else if (rs6000_tls_size
== 32)
8744 tmp2
= gen_reg_rtx (Pmode
);
8746 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
8748 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
8751 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
8753 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
8757 tmp2
= gen_reg_rtx (Pmode
);
8759 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
8761 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
8763 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
8769 /* IE, or 64-bit offset LE. */
8770 tmp2
= gen_reg_rtx (Pmode
);
8772 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
8774 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
8777 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
8779 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
8787 /* Only create the global variable for the stack protect guard if we are using
8788 the global flavor of that guard. */
8790 rs6000_init_stack_protect_guard (void)
8792 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
8793 return default_stack_protect_guard ();
8798 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8801 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
8803 if (GET_CODE (x
) == HIGH
8804 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
8807 /* A TLS symbol in the TOC cannot contain a sum. */
8808 if (GET_CODE (x
) == CONST
8809 && GET_CODE (XEXP (x
, 0)) == PLUS
8810 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
8811 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
8814 /* Do not place an ELF TLS symbol in the constant pool. */
8815 return TARGET_ELF
&& tls_referenced_p (x
);
8818 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8819 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8820 can be addressed relative to the toc pointer. */
8823 use_toc_relative_ref (rtx sym
, machine_mode mode
)
8825 return ((constant_pool_expr_p (sym
)
8826 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
8827 get_pool_mode (sym
)))
8828 || (TARGET_CMODEL
== CMODEL_MEDIUM
8829 && SYMBOL_REF_LOCAL_P (sym
)
8830 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
8833 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8834 replace the input X, or the original X if no replacement is called for.
8835 The output parameter *WIN is 1 if the calling macro should goto WIN,
8838 For RS/6000, we wish to handle large displacements off a base
8839 register by splitting the addend across an addiu/addis and the mem insn.
8840 This cuts number of extra insns needed from 3 to 1.
8842 On Darwin, we use this to generate code for floating point constants.
8843 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8844 The Darwin code is inside #if TARGET_MACHO because only then are the
8845 machopic_* functions defined. */
8847 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
8848 int opnum
, int type
,
8849 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
8851 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
8852 bool quad_offset_p
= mode_supports_dq_form (mode
);
8854 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8855 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8858 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
8859 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
8860 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
8861 && TARGET_P9_VECTOR
)
8862 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
8863 && TARGET_P9_VECTOR
)))
8864 reg_offset_p
= false;
8866 /* We must recognize output that we have already generated ourselves. */
8867 if (GET_CODE (x
) == PLUS
8868 && GET_CODE (XEXP (x
, 0)) == PLUS
8869 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
8870 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
8871 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
8873 if (TARGET_DEBUG_ADDR
)
8875 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
8878 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
8879 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
8880 opnum
, (enum reload_type
) type
);
8885 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8886 if (GET_CODE (x
) == LO_SUM
8887 && GET_CODE (XEXP (x
, 0)) == HIGH
)
8889 if (TARGET_DEBUG_ADDR
)
8891 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
8894 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
8895 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
8896 opnum
, (enum reload_type
) type
);
8902 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
8903 && GET_CODE (x
) == LO_SUM
8904 && GET_CODE (XEXP (x
, 0)) == PLUS
8905 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
8906 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
8907 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
8908 && machopic_operand_p (XEXP (x
, 1)))
8910 /* Result of previous invocation of this function on Darwin
8911 floating point constant. */
8912 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
8913 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
8914 opnum
, (enum reload_type
) type
);
8920 if (TARGET_CMODEL
!= CMODEL_SMALL
8923 && small_toc_ref (x
, VOIDmode
))
8925 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
8926 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
8927 if (TARGET_DEBUG_ADDR
)
8929 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
8932 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
8933 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
8934 opnum
, (enum reload_type
) type
);
8939 if (GET_CODE (x
) == PLUS
8940 && REG_P (XEXP (x
, 0))
8941 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
8942 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
8943 && CONST_INT_P (XEXP (x
, 1))
8945 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
8947 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
8948 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
8950 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8952 /* Check for 32-bit overflow or quad addresses with one of the
8953 four least significant bits set. */
8954 if (high
+ low
!= val
8955 || (quad_offset_p
&& (low
& 0xf)))
8961 /* Reload the high part into a base reg; leave the low part
8962 in the mem directly. */
8964 x
= gen_rtx_PLUS (GET_MODE (x
),
8965 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
8969 if (TARGET_DEBUG_ADDR
)
8971 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
8974 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
8975 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
8976 opnum
, (enum reload_type
) type
);
8981 if (GET_CODE (x
) == SYMBOL_REF
8984 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
8986 && DEFAULT_ABI
== ABI_DARWIN
8987 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
8988 && machopic_symbol_defined_p (x
)
8990 && DEFAULT_ABI
== ABI_V4
8993 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8994 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8996 ??? Assume floating point reg based on mode? This assumption is
8997 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8998 where reload ends up doing a DFmode load of a constant from
8999 mem using two gprs. Unfortunately, at this point reload
9000 hasn't yet selected regs so poking around in reload data
9001 won't help and even if we could figure out the regs reliably,
9002 we'd still want to allow this transformation when the mem is
9003 naturally aligned. Since we say the address is good here, we
9004 can't disable offsets from LO_SUMs in mem_operand_gpr.
9005 FIXME: Allow offset from lo_sum for other modes too, when
9006 mem is sufficiently aligned.
9008 Also disallow this if the type can go in VMX/Altivec registers, since
9009 those registers do not have d-form (reg+offset) address modes. */
9010 && !reg_addr
[mode
].scalar_in_vmx_p
9015 && (mode
!= TImode
|| !TARGET_VSX
)
9017 && (mode
!= DImode
|| TARGET_POWERPC64
)
9018 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9019 || TARGET_HARD_FLOAT
))
9024 rtx offset
= machopic_gen_offset (x
);
9025 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9026 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9027 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9031 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9032 gen_rtx_HIGH (Pmode
, x
), x
);
9034 if (TARGET_DEBUG_ADDR
)
9036 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9039 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9040 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9041 opnum
, (enum reload_type
) type
);
9046 /* Reload an offset address wrapped by an AND that represents the
9047 masking of the lower bits. Strip the outer AND and let reload
9048 convert the offset address into an indirect address. For VSX,
9049 force reload to create the address with an AND in a separate
9050 register, because we can't guarantee an altivec register will
9052 if (VECTOR_MEM_ALTIVEC_P (mode
)
9053 && GET_CODE (x
) == AND
9054 && GET_CODE (XEXP (x
, 0)) == PLUS
9055 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9056 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9057 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9058 && INTVAL (XEXP (x
, 1)) == -16)
9068 && GET_CODE (x
) == SYMBOL_REF
9069 && use_toc_relative_ref (x
, mode
))
9071 x
= create_TOC_reference (x
, NULL_RTX
);
9072 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9074 if (TARGET_DEBUG_ADDR
)
9076 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9079 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9080 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9081 opnum
, (enum reload_type
) type
);
9090 /* Debug version of rs6000_legitimize_reload_address. */
9092 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9093 int opnum
, int type
,
9094 int ind_levels
, int *win
)
9096 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9099 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9100 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9101 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9105 fprintf (stderr
, "Same address returned\n");
9107 fprintf (stderr
, "NULL returned\n");
9110 fprintf (stderr
, "New address:\n");
9117 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9118 that is a valid memory address for an instruction.
9119 The MODE argument is the machine mode for the MEM expression
9120 that wants to use this address.
9122 On the RS/6000, there are four valid address: a SYMBOL_REF that
9123 refers to a constant pool entry of an address (or the sum of it
9124 plus a constant), a short (16-bit signed) constant plus a register,
9125 the sum of two registers, or a register indirect, possibly with an
9126 auto-increment. For DFmode, DDmode and DImode with a constant plus
9127 register, we must ensure that both words are addressable or PowerPC64
9128 with offset word aligned.
9130 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9131 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9132 because adjacent memory cells are accessed by adding word-sized offsets
9133 during assembly output. */
9135 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9137 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9138 bool quad_offset_p
= mode_supports_dq_form (mode
);
9140 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9141 if (VECTOR_MEM_ALTIVEC_P (mode
)
9142 && GET_CODE (x
) == AND
9143 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9144 && INTVAL (XEXP (x
, 1)) == -16)
9147 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9149 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9152 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9153 && mode_supports_pre_incdec_p (mode
)
9154 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9156 /* Handle restricted vector d-form offsets in ISA 3.0. */
9159 if (quad_address_p (x
, mode
, reg_ok_strict
))
9162 else if (virtual_stack_registers_memory_p (x
))
9165 else if (reg_offset_p
)
9167 if (legitimate_small_data_p (mode
, x
))
9169 if (legitimate_constant_pool_address_p (x
, mode
,
9170 reg_ok_strict
|| lra_in_progress
))
9174 /* For TImode, if we have TImode in VSX registers, only allow register
9175 indirect addresses. This will allow the values to go in either GPRs
9176 or VSX registers without reloading. The vector types would tend to
9177 go into VSX registers, so we allow REG+REG, while TImode seems
9178 somewhat split, in that some uses are GPR based, and some VSX based. */
9179 /* FIXME: We could loosen this by changing the following to
9180 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9181 but currently we cannot allow REG+REG addressing for TImode. See
9182 PR72827 for complete details on how this ends up hoodwinking DSE. */
9183 if (mode
== TImode
&& TARGET_VSX
)
9185 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9188 && GET_CODE (x
) == PLUS
9189 && GET_CODE (XEXP (x
, 0)) == REG
9190 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9191 || XEXP (x
, 0) == arg_pointer_rtx
)
9192 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9194 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9196 if (!FLOAT128_2REG_P (mode
)
9197 && (TARGET_HARD_FLOAT
9199 || (mode
!= DFmode
&& mode
!= DDmode
))
9200 && (TARGET_POWERPC64
|| mode
!= DImode
)
9201 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9203 && !avoiding_indexed_address_p (mode
)
9204 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9206 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9207 && mode_supports_pre_modify_p (mode
)
9208 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9209 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9210 reg_ok_strict
, false)
9211 || (!avoiding_indexed_address_p (mode
)
9212 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9213 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9215 if (reg_offset_p
&& !quad_offset_p
9216 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9221 /* Debug version of rs6000_legitimate_address_p. */
9223 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9226 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9228 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9229 "strict = %d, reload = %s, code = %s\n",
9230 ret
? "true" : "false",
9231 GET_MODE_NAME (mode
),
9233 (reload_completed
? "after" : "before"),
9234 GET_RTX_NAME (GET_CODE (x
)));
9240 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9243 rs6000_mode_dependent_address_p (const_rtx addr
,
9244 addr_space_t as ATTRIBUTE_UNUSED
)
9246 return rs6000_mode_dependent_address_ptr (addr
);
9249 /* Go to LABEL if ADDR (a legitimate address expression)
9250 has an effect that depends on the machine mode it is used for.
9252 On the RS/6000 this is true of all integral offsets (since AltiVec
9253 and VSX modes don't allow them) or is a pre-increment or decrement.
9255 ??? Except that due to conceptual problems in offsettable_address_p
9256 we can't really report the problems of integral offsets. So leave
9257 this assuming that the adjustable offset must be valid for the
9258 sub-words of a TFmode operand, which is what we had before. */
9261 rs6000_mode_dependent_address (const_rtx addr
)
9263 switch (GET_CODE (addr
))
9266 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9267 is considered a legitimate address before reload, so there
9268 are no offset restrictions in that case. Note that this
9269 condition is safe in strict mode because any address involving
9270 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9271 been rejected as illegitimate. */
9272 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9273 && XEXP (addr
, 0) != arg_pointer_rtx
9274 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9276 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9277 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9282 /* Anything in the constant pool is sufficiently aligned that
9283 all bytes have the same high part address. */
9284 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9286 /* Auto-increment cases are now treated generically in recog.c. */
9288 return TARGET_UPDATE
;
9290 /* AND is only allowed in Altivec loads. */
9301 /* Debug version of rs6000_mode_dependent_address. */
9303 rs6000_debug_mode_dependent_address (const_rtx addr
)
9305 bool ret
= rs6000_mode_dependent_address (addr
);
9307 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9308 ret
? "true" : "false");
9314 /* Implement FIND_BASE_TERM. */
9317 rs6000_find_base_term (rtx op
)
9322 if (GET_CODE (base
) == CONST
)
9323 base
= XEXP (base
, 0);
9324 if (GET_CODE (base
) == PLUS
)
9325 base
= XEXP (base
, 0);
9326 if (GET_CODE (base
) == UNSPEC
)
9327 switch (XINT (base
, 1))
9330 case UNSPEC_MACHOPIC_OFFSET
:
9331 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9332 for aliasing purposes. */
9333 return XVECEXP (base
, 0, 0);
9339 /* More elaborate version of recog's offsettable_memref_p predicate
9340 that works around the ??? note of rs6000_mode_dependent_address.
9341 In particular it accepts
9343 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9345 in 32-bit mode, that the recog predicate rejects. */
9348 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
, bool strict
)
9355 /* First mimic offsettable_memref_p. */
9356 if (offsettable_address_p (strict
, GET_MODE (op
), XEXP (op
, 0)))
9359 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9360 the latter predicate knows nothing about the mode of the memory
9361 reference and, therefore, assumes that it is the largest supported
9362 mode (TFmode). As a consequence, legitimate offsettable memory
9363 references are rejected. rs6000_legitimate_offset_address_p contains
9364 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9365 at least with a little bit of help here given that we know the
9366 actual registers used. */
9367 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9368 || GET_MODE_SIZE (reg_mode
) == 4);
9369 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9370 strict
, worst_case
);
9373 /* Determine the reassociation width to be used in reassociate_bb.
9374 This takes into account how many parallel operations we
9375 can actually do of a given type, and also the latency.
9379 vect add/sub/mul 2/cycle
9380 fp add/sub/mul 2/cycle
9385 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
9388 switch (rs6000_tune
)
9390 case PROCESSOR_POWER8
:
9391 case PROCESSOR_POWER9
:
9392 if (DECIMAL_FLOAT_MODE_P (mode
))
9394 if (VECTOR_MODE_P (mode
))
9396 if (INTEGRAL_MODE_P (mode
))
9398 if (FLOAT_MODE_P (mode
))
9407 /* Change register usage conditional on target flags. */
9409 rs6000_conditional_register_usage (void)
9413 if (TARGET_DEBUG_TARGET
)
9414 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
9416 /* Set MQ register fixed (already call_used) so that it will not be
9420 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9422 fixed_regs
[13] = call_used_regs
[13]
9423 = call_really_used_regs
[13] = 1;
9425 /* Conditionally disable FPRs. */
9426 if (TARGET_SOFT_FLOAT
)
9427 for (i
= 32; i
< 64; i
++)
9428 fixed_regs
[i
] = call_used_regs
[i
]
9429 = call_really_used_regs
[i
] = 1;
9431 /* The TOC register is not killed across calls in a way that is
9432 visible to the compiler. */
9433 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9434 call_really_used_regs
[2] = 0;
9436 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
9437 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9439 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
9440 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9441 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9442 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9444 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
9445 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9446 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9447 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9449 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
9450 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9451 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9453 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
9455 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
9456 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
9457 call_really_used_regs
[VRSAVE_REGNO
] = 1;
9460 if (TARGET_ALTIVEC
|| TARGET_VSX
)
9461 global_regs
[VSCR_REGNO
] = 1;
9463 if (TARGET_ALTIVEC_ABI
)
9465 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
9466 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
9468 /* AIX reserves VR20:31 in non-extended ABI mode. */
9470 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
9471 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
9476 /* Output insns to set DEST equal to the constant SOURCE as a series of
9477 lis, ori and shl instructions and return TRUE. */
9480 rs6000_emit_set_const (rtx dest
, rtx source
)
9482 machine_mode mode
= GET_MODE (dest
);
9487 gcc_checking_assert (CONST_INT_P (source
));
9488 c
= INTVAL (source
);
9493 emit_insn (gen_rtx_SET (dest
, source
));
9497 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
9499 emit_insn (gen_rtx_SET (copy_rtx (temp
),
9500 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
9501 emit_insn (gen_rtx_SET (dest
,
9502 gen_rtx_IOR (SImode
, copy_rtx (temp
),
9503 GEN_INT (c
& 0xffff))));
9507 if (!TARGET_POWERPC64
)
9511 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
9513 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
9515 emit_move_insn (hi
, GEN_INT (c
>> 32));
9516 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
9517 emit_move_insn (lo
, GEN_INT (c
));
9520 rs6000_emit_set_long_const (dest
, c
);
9527 insn
= get_last_insn ();
9528 set
= single_set (insn
);
9529 if (! CONSTANT_P (SET_SRC (set
)))
9530 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
9535 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9536 Output insns to set DEST equal to the constant C as a series of
9537 lis, ori and shl instructions. */
9540 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
9543 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
9553 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
9554 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
9555 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
9557 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
9558 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
9560 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9562 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
9563 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
9565 emit_move_insn (dest
,
9566 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9569 else if (ud3
== 0 && ud4
== 0)
9571 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9573 gcc_assert (ud2
& 0x8000);
9574 emit_move_insn (copy_rtx (temp
),
9575 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
9577 emit_move_insn (copy_rtx (temp
),
9578 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9580 emit_move_insn (dest
,
9581 gen_rtx_ZERO_EXTEND (DImode
,
9582 gen_lowpart (SImode
,
9585 else if ((ud4
== 0xffff && (ud3
& 0x8000))
9586 || (ud4
== 0 && ! (ud3
& 0x8000)))
9588 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9590 emit_move_insn (copy_rtx (temp
),
9591 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
9593 emit_move_insn (copy_rtx (temp
),
9594 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9596 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
9597 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
9600 emit_move_insn (dest
,
9601 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9606 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9608 emit_move_insn (copy_rtx (temp
),
9609 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
9611 emit_move_insn (copy_rtx (temp
),
9612 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9615 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
9616 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
9619 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
9620 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9621 GEN_INT (ud2
<< 16)));
9623 emit_move_insn (dest
,
9624 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9629 /* Helper for the following. Get rid of [r+r] memory refs
9630 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9633 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
9635 if (GET_CODE (operands
[0]) == MEM
9636 && GET_CODE (XEXP (operands
[0], 0)) != REG
9637 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
9638 GET_MODE (operands
[0]), false))
9640 = replace_equiv_address (operands
[0],
9641 copy_addr_to_reg (XEXP (operands
[0], 0)));
9643 if (GET_CODE (operands
[1]) == MEM
9644 && GET_CODE (XEXP (operands
[1], 0)) != REG
9645 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
9646 GET_MODE (operands
[1]), false))
9648 = replace_equiv_address (operands
[1],
9649 copy_addr_to_reg (XEXP (operands
[1], 0)));
9652 /* Generate a vector of constants to permute MODE for a little-endian
9653 storage operation by swapping the two halves of a vector. */
9655 rs6000_const_vec (machine_mode mode
)
9683 v
= rtvec_alloc (subparts
);
9685 for (i
= 0; i
< subparts
/ 2; ++i
)
9686 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
9687 for (i
= subparts
/ 2; i
< subparts
; ++i
)
9688 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
9693 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9696 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
9698 /* Scalar permutations are easier to express in integer modes rather than
9699 floating-point modes, so cast them here. We use V1TImode instead
9700 of TImode to ensure that the values don't go through GPRs. */
9701 if (FLOAT128_VECTOR_P (mode
))
9703 dest
= gen_lowpart (V1TImode
, dest
);
9704 source
= gen_lowpart (V1TImode
, source
);
9708 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9710 if (mode
== TImode
|| mode
== V1TImode
)
9711 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
9715 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
9716 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
9720 /* Emit a little-endian load from vector memory location SOURCE to VSX
9721 register DEST in mode MODE. The load is done with two permuting
9722 insn's that represent an lxvd2x and xxpermdi. */
9724 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
9726 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9728 if (mode
== TImode
|| mode
== V1TImode
)
9731 dest
= gen_lowpart (V2DImode
, dest
);
9732 source
= adjust_address (source
, V2DImode
, 0);
9735 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
9736 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
9737 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
9740 /* Emit a little-endian store to vector memory location DEST from VSX
9741 register SOURCE in mode MODE. The store is done with two permuting
9742 insn's that represent an xxpermdi and an stxvd2x. */
9744 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
9746 /* This should never be called during or after LRA, because it does
9747 not re-permute the source register. It is intended only for use
9749 gcc_assert (!lra_in_progress
&& !reload_completed
);
9751 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9753 if (mode
== TImode
|| mode
== V1TImode
)
9756 dest
= adjust_address (dest
, V2DImode
, 0);
9757 source
= gen_lowpart (V2DImode
, source
);
9760 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
9761 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
9762 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
9765 /* Emit a sequence representing a little-endian VSX load or store,
9766 moving data from SOURCE to DEST in mode MODE. This is done
9767 separately from rs6000_emit_move to ensure it is called only
9768 during expand. LE VSX loads and stores introduced later are
9769 handled with a split. The expand-time RTL generation allows
9770 us to optimize away redundant pairs of register-permutes. */
9772 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
9774 gcc_assert (!BYTES_BIG_ENDIAN
9775 && VECTOR_MEM_VSX_P (mode
)
9776 && !TARGET_P9_VECTOR
9777 && !gpr_or_gpr_p (dest
, source
)
9778 && (MEM_P (source
) ^ MEM_P (dest
)));
9782 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
9783 rs6000_emit_le_vsx_load (dest
, source
, mode
);
9787 if (!REG_P (source
))
9788 source
= force_reg (mode
, source
);
9789 rs6000_emit_le_vsx_store (dest
, source
, mode
);
9793 /* Return whether a SFmode or SImode move can be done without converting one
9794 mode to another. This arrises when we have:
9796 (SUBREG:SF (REG:SI ...))
9797 (SUBREG:SI (REG:SF ...))
9799 and one of the values is in a floating point/vector register, where SFmode
9800 scalars are stored in DFmode format. */
9803 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
9805 if (TARGET_ALLOW_SF_SUBREG
)
9808 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
9811 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
9814 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9815 if (SUBREG_P (dest
))
9817 rtx dest_subreg
= SUBREG_REG (dest
);
9818 rtx src_subreg
= SUBREG_REG (src
);
9819 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
9826 /* Helper function to change moves with:
9828 (SUBREG:SF (REG:SI)) and
9829 (SUBREG:SI (REG:SF))
9831 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9832 values are stored as DFmode values in the VSX registers. We need to convert
9833 the bits before we can use a direct move or operate on the bits in the
9834 vector register as an integer type.
9836 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9839 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
9841 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
9842 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
9843 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
9845 rtx inner_source
= SUBREG_REG (source
);
9846 machine_mode inner_mode
= GET_MODE (inner_source
);
9848 if (mode
== SImode
&& inner_mode
== SFmode
)
9850 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
9854 if (mode
== SFmode
&& inner_mode
== SImode
)
9856 emit_insn (gen_movsf_from_si (dest
, inner_source
));
9864 /* Emit a move from SOURCE to DEST in mode MODE. */
9866 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
9870 operands
[1] = source
;
9872 if (TARGET_DEBUG_ADDR
)
9875 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9876 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9877 GET_MODE_NAME (mode
),
9880 can_create_pseudo_p ());
9882 fprintf (stderr
, "source:\n");
9886 /* Check that we get CONST_WIDE_INT only when we should. */
9887 if (CONST_WIDE_INT_P (operands
[1])
9888 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
9891 #ifdef HAVE_AS_GNU_ATTRIBUTE
9892 /* If we use a long double type, set the flags in .gnu_attribute that say
9893 what the long double type is. This is to allow the linker's warning
9894 message for the wrong long double to be useful, even if the function does
9895 not do a call (for example, doing a 128-bit add on power9 if the long
9896 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9897 used if they aren't the default long dobule type. */
9898 if (rs6000_gnu_attr
&& (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
))
9900 if (TARGET_LONG_DOUBLE_128
&& (mode
== TFmode
|| mode
== TCmode
))
9901 rs6000_passes_float
= rs6000_passes_long_double
= true;
9903 else if (!TARGET_LONG_DOUBLE_128
&& (mode
== DFmode
|| mode
== DCmode
))
9904 rs6000_passes_float
= rs6000_passes_long_double
= true;
9908 /* See if we need to special case SImode/SFmode SUBREG moves. */
9909 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
9910 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
9913 /* Check if GCC is setting up a block move that will end up using FP
9914 registers as temporaries. We must make sure this is acceptable. */
9915 if (GET_CODE (operands
[0]) == MEM
9916 && GET_CODE (operands
[1]) == MEM
9918 && (rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[0]))
9919 || rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[1])))
9920 && ! (rs6000_slow_unaligned_access (SImode
,
9921 (MEM_ALIGN (operands
[0]) > 32
9922 ? 32 : MEM_ALIGN (operands
[0])))
9923 || rs6000_slow_unaligned_access (SImode
,
9924 (MEM_ALIGN (operands
[1]) > 32
9925 ? 32 : MEM_ALIGN (operands
[1]))))
9926 && ! MEM_VOLATILE_P (operands
[0])
9927 && ! MEM_VOLATILE_P (operands
[1]))
9929 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
9930 adjust_address (operands
[1], SImode
, 0));
9931 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
9932 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
9936 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
9937 && !gpc_reg_operand (operands
[1], mode
))
9938 operands
[1] = force_reg (mode
, operands
[1]);
9940 /* Recognize the case where operand[1] is a reference to thread-local
9941 data and load its address to a register. */
9942 if (tls_referenced_p (operands
[1]))
9944 enum tls_model model
;
9945 rtx tmp
= operands
[1];
9948 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
9950 addend
= XEXP (XEXP (tmp
, 0), 1);
9951 tmp
= XEXP (XEXP (tmp
, 0), 0);
9954 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
9955 model
= SYMBOL_REF_TLS_MODEL (tmp
);
9956 gcc_assert (model
!= 0);
9958 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
9961 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
9962 tmp
= force_operand (tmp
, operands
[0]);
9967 /* 128-bit constant floating-point values on Darwin should really be loaded
9968 as two parts. However, this premature splitting is a problem when DFmode
9969 values can go into Altivec registers. */
9970 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
9971 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
9973 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
9974 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
9976 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
9977 GET_MODE_SIZE (DFmode
)),
9978 simplify_gen_subreg (DFmode
, operands
[1], mode
,
9979 GET_MODE_SIZE (DFmode
)),
9984 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9985 p1:SD) if p1 is not of floating point class and p0 is spilled as
9986 we can have no analogous movsd_store for this. */
9987 if (lra_in_progress
&& mode
== DDmode
9988 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
9989 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
9990 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
9991 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
9994 int regno
= REGNO (SUBREG_REG (operands
[1]));
9996 if (regno
>= FIRST_PSEUDO_REGISTER
)
9998 cl
= reg_preferred_class (regno
);
9999 regno
= reg_renumber
[regno
];
10001 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10003 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10006 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10007 operands
[1] = SUBREG_REG (operands
[1]);
10010 if (lra_in_progress
10012 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10013 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10014 && (REG_P (operands
[1])
10015 || (GET_CODE (operands
[1]) == SUBREG
10016 && REG_P (SUBREG_REG (operands
[1])))))
10018 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10019 ? SUBREG_REG (operands
[1]) : operands
[1]);
10022 if (regno
>= FIRST_PSEUDO_REGISTER
)
10024 cl
= reg_preferred_class (regno
);
10025 gcc_assert (cl
!= NO_REGS
);
10026 regno
= reg_renumber
[regno
];
10028 regno
= ira_class_hard_regs
[cl
][0];
10030 if (FP_REGNO_P (regno
))
10032 if (GET_MODE (operands
[0]) != DDmode
)
10033 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10034 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10036 else if (INT_REGNO_P (regno
))
10037 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10042 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10043 p:DD)) if p0 is not of floating point class and p1 is spilled as
10044 we can have no analogous movsd_load for this. */
10045 if (lra_in_progress
&& mode
== DDmode
10046 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10047 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10048 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10049 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10052 int regno
= REGNO (SUBREG_REG (operands
[0]));
10054 if (regno
>= FIRST_PSEUDO_REGISTER
)
10056 cl
= reg_preferred_class (regno
);
10057 regno
= reg_renumber
[regno
];
10059 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10061 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10064 operands
[0] = SUBREG_REG (operands
[0]);
10065 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10068 if (lra_in_progress
10070 && (REG_P (operands
[0])
10071 || (GET_CODE (operands
[0]) == SUBREG
10072 && REG_P (SUBREG_REG (operands
[0]))))
10073 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10074 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10076 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10077 ? SUBREG_REG (operands
[0]) : operands
[0]);
10080 if (regno
>= FIRST_PSEUDO_REGISTER
)
10082 cl
= reg_preferred_class (regno
);
10083 gcc_assert (cl
!= NO_REGS
);
10084 regno
= reg_renumber
[regno
];
10086 regno
= ira_class_hard_regs
[cl
][0];
10088 if (FP_REGNO_P (regno
))
10090 if (GET_MODE (operands
[1]) != DDmode
)
10091 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10092 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10094 else if (INT_REGNO_P (regno
))
10095 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10101 /* FIXME: In the long term, this switch statement should go away
10102 and be replaced by a sequence of tests based on things like
10108 if (CONSTANT_P (operands
[1])
10109 && GET_CODE (operands
[1]) != CONST_INT
)
10110 operands
[1] = force_const_mem (mode
, operands
[1]);
10117 if (FLOAT128_2REG_P (mode
))
10118 rs6000_eliminate_indexed_memrefs (operands
);
10125 if (CONSTANT_P (operands
[1])
10126 && ! easy_fp_constant (operands
[1], mode
))
10127 operands
[1] = force_const_mem (mode
, operands
[1]);
10137 if (CONSTANT_P (operands
[1])
10138 && !easy_vector_constant (operands
[1], mode
))
10139 operands
[1] = force_const_mem (mode
, operands
[1]);
10144 /* Use default pattern for address of ELF small data */
10147 && DEFAULT_ABI
== ABI_V4
10148 && (GET_CODE (operands
[1]) == SYMBOL_REF
10149 || GET_CODE (operands
[1]) == CONST
)
10150 && small_data_operand (operands
[1], mode
))
10152 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10156 if (DEFAULT_ABI
== ABI_V4
10157 && mode
== Pmode
&& mode
== SImode
10158 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10160 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10164 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10168 && CONSTANT_P (operands
[1])
10169 && GET_CODE (operands
[1]) != HIGH
10170 && GET_CODE (operands
[1]) != CONST_INT
)
10172 rtx target
= (!can_create_pseudo_p ()
10174 : gen_reg_rtx (mode
));
10176 /* If this is a function address on -mcall-aixdesc,
10177 convert it to the address of the descriptor. */
10178 if (DEFAULT_ABI
== ABI_AIX
10179 && GET_CODE (operands
[1]) == SYMBOL_REF
10180 && XSTR (operands
[1], 0)[0] == '.')
10182 const char *name
= XSTR (operands
[1], 0);
10184 while (*name
== '.')
10186 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10187 CONSTANT_POOL_ADDRESS_P (new_ref
)
10188 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10189 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10190 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10191 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10192 operands
[1] = new_ref
;
10195 if (DEFAULT_ABI
== ABI_DARWIN
)
10198 if (MACHO_DYNAMIC_NO_PIC_P
)
10200 /* Take care of any required data indirection. */
10201 operands
[1] = rs6000_machopic_legitimize_pic_address (
10202 operands
[1], mode
, operands
[0]);
10203 if (operands
[0] != operands
[1])
10204 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10208 emit_insn (gen_macho_high (target
, operands
[1]));
10209 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10213 emit_insn (gen_elf_high (target
, operands
[1]));
10214 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10218 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10219 and we have put it in the TOC, we just need to make a TOC-relative
10220 reference to it. */
10222 && GET_CODE (operands
[1]) == SYMBOL_REF
10223 && use_toc_relative_ref (operands
[1], mode
))
10224 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10225 else if (mode
== Pmode
10226 && CONSTANT_P (operands
[1])
10227 && GET_CODE (operands
[1]) != HIGH
10228 && ((REG_P (operands
[0])
10229 && FP_REGNO_P (REGNO (operands
[0])))
10230 || !CONST_INT_P (operands
[1])
10231 || (num_insns_constant (operands
[1], mode
)
10232 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10233 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10234 && (TARGET_CMODEL
== CMODEL_SMALL
10235 || can_create_pseudo_p ()
10236 || (REG_P (operands
[0])
10237 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10241 /* Darwin uses a special PIC legitimizer. */
10242 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10245 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10247 if (operands
[0] != operands
[1])
10248 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10253 /* If we are to limit the number of things we put in the TOC and
10254 this is a symbol plus a constant we can add in one insn,
10255 just put the symbol in the TOC and add the constant. */
10256 if (GET_CODE (operands
[1]) == CONST
10257 && TARGET_NO_SUM_IN_TOC
10258 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10259 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10260 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10261 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10262 && ! side_effects_p (operands
[0]))
10265 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10266 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10268 sym
= force_reg (mode
, sym
);
10269 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10273 operands
[1] = force_const_mem (mode
, operands
[1]);
10276 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10277 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10279 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10281 operands
[1] = gen_const_mem (mode
, tocref
);
10282 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10288 if (!VECTOR_MEM_VSX_P (TImode
))
10289 rs6000_eliminate_indexed_memrefs (operands
);
10293 rs6000_eliminate_indexed_memrefs (operands
);
10297 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10300 /* Above, we may have called force_const_mem which may have returned
10301 an invalid address. If we can, fix this up; otherwise, reload will
10302 have to deal with it. */
10303 if (GET_CODE (operands
[1]) == MEM
)
10304 operands
[1] = validize_mem (operands
[1]);
10306 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10309 /* Nonzero if we can use a floating-point register to pass this arg. */
10310 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10311 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10312 && (CUM)->fregno <= FP_ARG_MAX_REG \
10313 && TARGET_HARD_FLOAT)
10315 /* Nonzero if we can use an AltiVec register to pass this arg. */
10316 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10317 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10318 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10319 && TARGET_ALTIVEC_ABI \
10322 /* Walk down the type tree of TYPE counting consecutive base elements.
10323 If *MODEP is VOIDmode, then set it to the first valid floating point
10324 or vector type. If a non-floating point or vector type is found, or
10325 if a floating point or vector type that doesn't match a non-VOIDmode
10326 *MODEP is found, then return -1, otherwise return the count in the
10330 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10333 HOST_WIDE_INT size
;
10335 switch (TREE_CODE (type
))
10338 mode
= TYPE_MODE (type
);
10339 if (!SCALAR_FLOAT_MODE_P (mode
))
10342 if (*modep
== VOIDmode
)
10345 if (*modep
== mode
)
10351 mode
= TYPE_MODE (TREE_TYPE (type
));
10352 if (!SCALAR_FLOAT_MODE_P (mode
))
10355 if (*modep
== VOIDmode
)
10358 if (*modep
== mode
)
10364 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10367 /* Use V4SImode as representative of all 128-bit vector types. */
10368 size
= int_size_in_bytes (type
);
10378 if (*modep
== VOIDmode
)
10381 /* Vector modes are considered to be opaque: two vectors are
10382 equivalent for the purposes of being homogeneous aggregates
10383 if they are the same size. */
10384 if (*modep
== mode
)
10392 tree index
= TYPE_DOMAIN (type
);
10394 /* Can't handle incomplete types nor sizes that are not
10396 if (!COMPLETE_TYPE_P (type
)
10397 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10400 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10403 || !TYPE_MAX_VALUE (index
)
10404 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
10405 || !TYPE_MIN_VALUE (index
)
10406 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
10410 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
10411 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
10413 /* There must be no padding. */
10414 if (wi::to_wide (TYPE_SIZE (type
))
10415 != count
* GET_MODE_BITSIZE (*modep
))
10427 /* Can't handle incomplete types nor sizes that are not
10429 if (!COMPLETE_TYPE_P (type
)
10430 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10433 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10435 if (TREE_CODE (field
) != FIELD_DECL
)
10438 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10441 count
+= sub_count
;
10444 /* There must be no padding. */
10445 if (wi::to_wide (TYPE_SIZE (type
))
10446 != count
* GET_MODE_BITSIZE (*modep
))
10453 case QUAL_UNION_TYPE
:
10455 /* These aren't very interesting except in a degenerate case. */
10460 /* Can't handle incomplete types nor sizes that are not
10462 if (!COMPLETE_TYPE_P (type
)
10463 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10466 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10468 if (TREE_CODE (field
) != FIELD_DECL
)
10471 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10474 count
= count
> sub_count
? count
: sub_count
;
10477 /* There must be no padding. */
10478 if (wi::to_wide (TYPE_SIZE (type
))
10479 != count
* GET_MODE_BITSIZE (*modep
))
10492 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10493 float or vector aggregate that shall be passed in FP/vector registers
10494 according to the ELFv2 ABI, return the homogeneous element mode in
10495 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10497 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10500 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
10501 machine_mode
*elt_mode
,
10504 /* Note that we do not accept complex types at the top level as
10505 homogeneous aggregates; these types are handled via the
10506 targetm.calls.split_complex_arg mechanism. Complex types
10507 can be elements of homogeneous aggregates, however. */
10508 if (TARGET_HARD_FLOAT
&& DEFAULT_ABI
== ABI_ELFv2
&& type
10509 && AGGREGATE_TYPE_P (type
))
10511 machine_mode field_mode
= VOIDmode
;
10512 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
10514 if (field_count
> 0)
10516 int reg_size
= ALTIVEC_OR_VSX_VECTOR_MODE (field_mode
) ? 16 : 8;
10517 int field_size
= ROUND_UP (GET_MODE_SIZE (field_mode
), reg_size
);
10519 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10520 up to AGGR_ARG_NUM_REG registers. */
10521 if (field_count
* field_size
<= AGGR_ARG_NUM_REG
* reg_size
)
10524 *elt_mode
= field_mode
;
10526 *n_elts
= field_count
;
10539 /* Return a nonzero value to say to return the function value in
10540 memory, just as large structures are always returned. TYPE will be
10541 the data type of the value, and FNTYPE will be the type of the
10542 function doing the returning, or @code{NULL} for libcalls.
10544 The AIX ABI for the RS/6000 specifies that all structures are
10545 returned in memory. The Darwin ABI does the same.
10547 For the Darwin 64 Bit ABI, a function result can be returned in
10548 registers or in memory, depending on the size of the return data
10549 type. If it is returned in registers, the value occupies the same
10550 registers as it would if it were the first and only function
10551 argument. Otherwise, the function places its result in memory at
10552 the location pointed to by GPR3.
10554 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10555 but a draft put them in memory, and GCC used to implement the draft
10556 instead of the final standard. Therefore, aix_struct_return
10557 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10558 compatibility can change DRAFT_V4_STRUCT_RET to override the
10559 default, and -m switches get the final word. See
10560 rs6000_option_override_internal for more details.
10562 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10563 long double support is enabled. These values are returned in memory.
10565 int_size_in_bytes returns -1 for variable size objects, which go in
10566 memory always. The cast to unsigned makes -1 > 8. */
10569 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
10571 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10573 && rs6000_darwin64_abi
10574 && TREE_CODE (type
) == RECORD_TYPE
10575 && int_size_in_bytes (type
) > 0)
10577 CUMULATIVE_ARGS valcum
;
10581 valcum
.fregno
= FP_ARG_MIN_REG
;
10582 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
10583 /* Do a trial code generation as if this were going to be passed
10584 as an argument; if any part goes in memory, we return NULL. */
10585 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
10588 /* Otherwise fall through to more conventional ABI rules. */
10591 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10592 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
10596 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10597 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
10598 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
10601 if (AGGREGATE_TYPE_P (type
)
10602 && (aix_struct_return
10603 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
10606 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10607 modes only exist for GCC vector types if -maltivec. */
10608 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
10609 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
10612 /* Return synthetic vectors in memory. */
10613 if (TREE_CODE (type
) == VECTOR_TYPE
10614 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
10616 static bool warned_for_return_big_vectors
= false;
10617 if (!warned_for_return_big_vectors
)
10619 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
10620 "non-standard ABI extension with no compatibility "
10622 warned_for_return_big_vectors
= true;
10627 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
10628 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
10634 /* Specify whether values returned in registers should be at the most
10635 significant end of a register. We want aggregates returned by
10636 value to match the way aggregates are passed to functions. */
10639 rs6000_return_in_msb (const_tree valtype
)
10641 return (DEFAULT_ABI
== ABI_ELFv2
10642 && BYTES_BIG_ENDIAN
10643 && AGGREGATE_TYPE_P (valtype
)
10644 && (rs6000_function_arg_padding (TYPE_MODE (valtype
), valtype
)
10648 #ifdef HAVE_AS_GNU_ATTRIBUTE
10649 /* Return TRUE if a call to function FNDECL may be one that
10650 potentially affects the function calling ABI of the object file. */
10653 call_ABI_of_interest (tree fndecl
)
10655 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
10657 struct cgraph_node
*c_node
;
10659 /* Libcalls are always interesting. */
10660 if (fndecl
== NULL_TREE
)
10663 /* Any call to an external function is interesting. */
10664 if (DECL_EXTERNAL (fndecl
))
10667 /* Interesting functions that we are emitting in this object file. */
10668 c_node
= cgraph_node::get (fndecl
);
10669 c_node
= c_node
->ultimate_alias_target ();
10670 return !c_node
->only_called_directly_p ();
10676 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10677 for a call to a function whose data type is FNTYPE.
10678 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10680 For incoming args we set the number of arguments in the prototype large
10681 so we never return a PARALLEL. */
10684 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
10685 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
10686 int libcall
, int n_named_args
,
10688 machine_mode return_mode ATTRIBUTE_UNUSED
)
10690 static CUMULATIVE_ARGS zero_cumulative
;
10692 *cum
= zero_cumulative
;
10694 cum
->fregno
= FP_ARG_MIN_REG
;
10695 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
10696 cum
->prototype
= (fntype
&& prototype_p (fntype
));
10697 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
10698 ? CALL_LIBCALL
: CALL_NORMAL
);
10699 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
10700 cum
->stdarg
= stdarg_p (fntype
);
10701 cum
->libcall
= libcall
;
10703 cum
->nargs_prototype
= 0;
10704 if (incoming
|| cum
->prototype
)
10705 cum
->nargs_prototype
= n_named_args
;
10707 /* Check for a longcall attribute. */
10708 if ((!fntype
&& rs6000_default_long_calls
)
10710 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
10711 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
10712 cum
->call_cookie
|= CALL_LONG
;
10713 else if (DEFAULT_ABI
!= ABI_DARWIN
)
10715 bool is_local
= (fndecl
10716 && !DECL_EXTERNAL (fndecl
)
10717 && !DECL_WEAK (fndecl
)
10718 && (*targetm
.binds_local_p
) (fndecl
));
10724 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype
)))
10725 cum
->call_cookie
|= CALL_LONG
;
10730 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype
))))
10731 cum
->call_cookie
|= CALL_LONG
;
10735 if (TARGET_DEBUG_ARG
)
10737 fprintf (stderr
, "\ninit_cumulative_args:");
10740 tree ret_type
= TREE_TYPE (fntype
);
10741 fprintf (stderr
, " ret code = %s,",
10742 get_tree_code_name (TREE_CODE (ret_type
)));
10745 if (cum
->call_cookie
& CALL_LONG
)
10746 fprintf (stderr
, " longcall,");
10748 fprintf (stderr
, " proto = %d, nargs = %d\n",
10749 cum
->prototype
, cum
->nargs_prototype
);
10752 #ifdef HAVE_AS_GNU_ATTRIBUTE
10753 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
10755 cum
->escapes
= call_ABI_of_interest (fndecl
);
10762 return_type
= TREE_TYPE (fntype
);
10763 return_mode
= TYPE_MODE (return_type
);
10766 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
10768 if (return_type
!= NULL
)
10770 if (TREE_CODE (return_type
) == RECORD_TYPE
10771 && TYPE_TRANSPARENT_AGGR (return_type
))
10773 return_type
= TREE_TYPE (first_field (return_type
));
10774 return_mode
= TYPE_MODE (return_type
);
10776 if (AGGREGATE_TYPE_P (return_type
)
10777 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
10779 rs6000_returns_struct
= true;
10781 if (SCALAR_FLOAT_MODE_P (return_mode
))
10783 rs6000_passes_float
= true;
10784 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
10785 && (FLOAT128_IBM_P (return_mode
)
10786 || FLOAT128_IEEE_P (return_mode
)
10787 || (return_type
!= NULL
10788 && (TYPE_MAIN_VARIANT (return_type
)
10789 == long_double_type_node
))))
10790 rs6000_passes_long_double
= true;
10792 /* Note if we passed or return a IEEE 128-bit type. We changed
10793 the mangling for these types, and we may need to make an alias
10794 with the old mangling. */
10795 if (FLOAT128_IEEE_P (return_mode
))
10796 rs6000_passes_ieee128
= true;
10798 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
))
10799 rs6000_passes_vector
= true;
10806 && TARGET_ALTIVEC_ABI
10807 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
10809 error ("cannot return value in vector register because"
10810 " altivec instructions are disabled, use %qs"
10811 " to enable them", "-maltivec");
10815 /* The mode the ABI uses for a word. This is not the same as word_mode
10816 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10818 static scalar_int_mode
10819 rs6000_abi_word_mode (void)
10821 return TARGET_32BIT
? SImode
: DImode
;
10824 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10826 rs6000_offload_options (void)
10829 return xstrdup ("-foffload-abi=lp64");
10831 return xstrdup ("-foffload-abi=ilp32");
10834 /* On rs6000, function arguments are promoted, as are function return
10837 static machine_mode
10838 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
10840 int *punsignedp ATTRIBUTE_UNUSED
,
10843 PROMOTE_MODE (mode
, *punsignedp
, type
);
10848 /* Return true if TYPE must be passed on the stack and not in registers. */
10851 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
10853 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
10854 return must_pass_in_stack_var_size (mode
, type
);
10856 return must_pass_in_stack_var_size_or_pad (mode
, type
);
10860 is_complex_IBM_long_double (machine_mode mode
)
10862 return mode
== ICmode
|| (mode
== TCmode
&& FLOAT128_IBM_P (TCmode
));
10865 /* Whether ABI_V4 passes MODE args to a function in floating point
10869 abi_v4_pass_in_fpr (machine_mode mode
, bool named
)
10871 if (!TARGET_HARD_FLOAT
)
10873 if (mode
== DFmode
)
10875 if (mode
== SFmode
&& named
)
10877 /* ABI_V4 passes complex IBM long double in 8 gprs.
10878 Stupid, but we can't change the ABI now. */
10879 if (is_complex_IBM_long_double (mode
))
10881 if (FLOAT128_2REG_P (mode
))
10883 if (DECIMAL_FLOAT_MODE_P (mode
))
10888 /* Implement TARGET_FUNCTION_ARG_PADDING.
10890 For the AIX ABI structs are always stored left shifted in their
10893 static pad_direction
10894 rs6000_function_arg_padding (machine_mode mode
, const_tree type
)
10896 #ifndef AGGREGATE_PADDING_FIXED
10897 #define AGGREGATE_PADDING_FIXED 0
10899 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10900 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10903 if (!AGGREGATE_PADDING_FIXED
)
10905 /* GCC used to pass structures of the same size as integer types as
10906 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10907 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10908 passed padded downward, except that -mstrict-align further
10909 muddied the water in that multi-component structures of 2 and 4
10910 bytes in size were passed padded upward.
10912 The following arranges for best compatibility with previous
10913 versions of gcc, but removes the -mstrict-align dependency. */
10914 if (BYTES_BIG_ENDIAN
)
10916 HOST_WIDE_INT size
= 0;
10918 if (mode
== BLKmode
)
10920 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
10921 size
= int_size_in_bytes (type
);
10924 size
= GET_MODE_SIZE (mode
);
10926 if (size
== 1 || size
== 2 || size
== 4)
10927 return PAD_DOWNWARD
;
10932 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
10934 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
10938 /* Fall back to the default. */
10939 return default_function_arg_padding (mode
, type
);
10942 /* If defined, a C expression that gives the alignment boundary, in bits,
10943 of an argument with the specified mode and type. If it is not defined,
10944 PARM_BOUNDARY is used for all arguments.
10946 V.4 wants long longs and doubles to be double word aligned. Just
10947 testing the mode size is a boneheaded way to do this as it means
10948 that other types such as complex int are also double word aligned.
10949 However, we're stuck with this because changing the ABI might break
10950 existing library interfaces.
10952 Quadword align Altivec/VSX vectors.
10953 Quadword align large synthetic vector types. */
10955 static unsigned int
10956 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
10958 machine_mode elt_mode
;
10961 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
10963 if (DEFAULT_ABI
== ABI_V4
10964 && (GET_MODE_SIZE (mode
) == 8
10965 || (TARGET_HARD_FLOAT
10966 && !is_complex_IBM_long_double (mode
)
10967 && FLOAT128_2REG_P (mode
))))
10969 else if (FLOAT128_VECTOR_P (mode
))
10971 else if (type
&& TREE_CODE (type
) == VECTOR_TYPE
10972 && int_size_in_bytes (type
) >= 8
10973 && int_size_in_bytes (type
) < 16)
10975 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
10976 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
10977 && int_size_in_bytes (type
) >= 16))
10980 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10981 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10982 -mcompat-align-parm is used. */
10983 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
10984 || DEFAULT_ABI
== ABI_ELFv2
)
10985 && type
&& TYPE_ALIGN (type
) > 64)
10987 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10988 or homogeneous float/vector aggregates here. We already handled
10989 vector aggregates above, but still need to check for float here. */
10990 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
10991 && !SCALAR_FLOAT_MODE_P (elt_mode
));
10993 /* We used to check for BLKmode instead of the above aggregate type
10994 check. Warn when this results in any difference to the ABI. */
10995 if (aggregate_p
!= (mode
== BLKmode
))
10997 static bool warned
;
10998 if (!warned
&& warn_psabi
)
11001 inform (input_location
,
11002 "the ABI of passing aggregates with %d-byte alignment"
11003 " has changed in GCC 5",
11004 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11012 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11013 implement the "aggregate type" check as a BLKmode check here; this
11014 means certain aggregate types are in fact not aligned. */
11015 if (TARGET_MACHO
&& rs6000_darwin64_abi
11017 && type
&& TYPE_ALIGN (type
) > 64)
11020 return PARM_BOUNDARY
;
11023 /* The offset in words to the start of the parameter save area. */
11025 static unsigned int
11026 rs6000_parm_offset (void)
11028 return (DEFAULT_ABI
== ABI_V4
? 2
11029 : DEFAULT_ABI
== ABI_ELFv2
? 4
11033 /* For a function parm of MODE and TYPE, return the starting word in
11034 the parameter area. NWORDS of the parameter area are already used. */
11036 static unsigned int
11037 rs6000_parm_start (machine_mode mode
, const_tree type
,
11038 unsigned int nwords
)
11040 unsigned int align
;
11042 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11043 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11046 /* Compute the size (in words) of a function argument. */
11048 static unsigned long
11049 rs6000_arg_size (machine_mode mode
, const_tree type
)
11051 unsigned long size
;
11053 if (mode
!= BLKmode
)
11054 size
= GET_MODE_SIZE (mode
);
11056 size
= int_size_in_bytes (type
);
11059 return (size
+ 3) >> 2;
11061 return (size
+ 7) >> 3;
11064 /* Use this to flush pending int fields. */
11067 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11068 HOST_WIDE_INT bitpos
, int final
)
11070 unsigned int startbit
, endbit
;
11071 int intregs
, intoffset
;
11073 /* Handle the situations where a float is taking up the first half
11074 of the GPR, and the other half is empty (typically due to
11075 alignment restrictions). We can detect this by a 8-byte-aligned
11076 int field, or by seeing that this is the final flush for this
11077 argument. Count the word and continue on. */
11078 if (cum
->floats_in_gpr
== 1
11079 && (cum
->intoffset
% 64 == 0
11080 || (cum
->intoffset
== -1 && final
)))
11083 cum
->floats_in_gpr
= 0;
11086 if (cum
->intoffset
== -1)
11089 intoffset
= cum
->intoffset
;
11090 cum
->intoffset
= -1;
11091 cum
->floats_in_gpr
= 0;
11093 if (intoffset
% BITS_PER_WORD
!= 0)
11095 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11096 if (!int_mode_for_size (bits
, 0).exists ())
11098 /* We couldn't find an appropriate mode, which happens,
11099 e.g., in packed structs when there are 3 bytes to load.
11100 Back intoffset back to the beginning of the word in this
11102 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11106 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11107 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11108 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11109 cum
->words
+= intregs
;
11110 /* words should be unsigned. */
11111 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11113 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11118 /* The darwin64 ABI calls for us to recurse down through structs,
11119 looking for elements passed in registers. Unfortunately, we have
11120 to track int register count here also because of misalignments
11121 in powerpc alignment mode. */
11124 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11126 HOST_WIDE_INT startbitpos
)
11130 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11131 if (TREE_CODE (f
) == FIELD_DECL
)
11133 HOST_WIDE_INT bitpos
= startbitpos
;
11134 tree ftype
= TREE_TYPE (f
);
11136 if (ftype
== error_mark_node
)
11138 mode
= TYPE_MODE (ftype
);
11140 if (DECL_SIZE (f
) != 0
11141 && tree_fits_uhwi_p (bit_position (f
)))
11142 bitpos
+= int_bit_position (f
);
11144 /* ??? FIXME: else assume zero offset. */
11146 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11147 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11148 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11150 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11151 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11152 cum
->fregno
+= n_fpregs
;
11153 /* Single-precision floats present a special problem for
11154 us, because they are smaller than an 8-byte GPR, and so
11155 the structure-packing rules combined with the standard
11156 varargs behavior mean that we want to pack float/float
11157 and float/int combinations into a single register's
11158 space. This is complicated by the arg advance flushing,
11159 which works on arbitrarily large groups of int-type
11161 if (mode
== SFmode
)
11163 if (cum
->floats_in_gpr
== 1)
11165 /* Two floats in a word; count the word and reset
11166 the float count. */
11168 cum
->floats_in_gpr
= 0;
11170 else if (bitpos
% 64 == 0)
11172 /* A float at the beginning of an 8-byte word;
11173 count it and put off adjusting cum->words until
11174 we see if a arg advance flush is going to do it
11176 cum
->floats_in_gpr
++;
11180 /* The float is at the end of a word, preceded
11181 by integer fields, so the arg advance flush
11182 just above has already set cum->words and
11183 everything is taken care of. */
11187 cum
->words
+= n_fpregs
;
11189 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11191 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11195 else if (cum
->intoffset
== -1)
11196 cum
->intoffset
= bitpos
;
11200 /* Check for an item that needs to be considered specially under the darwin 64
11201 bit ABI. These are record types where the mode is BLK or the structure is
11202 8 bytes in size. */
11204 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11206 return rs6000_darwin64_abi
11207 && ((mode
== BLKmode
11208 && TREE_CODE (type
) == RECORD_TYPE
11209 && int_size_in_bytes (type
) > 0)
11210 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11211 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11214 /* Update the data in CUM to advance over an argument
11215 of mode MODE and data type TYPE.
11216 (TYPE is null for libcalls where that information may not be available.)
11218 Note that for args passed by reference, function_arg will be called
11219 with MODE and TYPE set to that of the pointer to the arg, not the arg
11223 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11224 const_tree type
, bool named
, int depth
)
11226 machine_mode elt_mode
;
11229 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11231 /* Only tick off an argument if we're not recursing. */
11233 cum
->nargs_prototype
--;
11235 #ifdef HAVE_AS_GNU_ATTRIBUTE
11236 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11239 if (SCALAR_FLOAT_MODE_P (mode
))
11241 rs6000_passes_float
= true;
11242 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11243 && (FLOAT128_IBM_P (mode
)
11244 || FLOAT128_IEEE_P (mode
)
11246 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11247 rs6000_passes_long_double
= true;
11249 /* Note if we passed or return a IEEE 128-bit type. We changed the
11250 mangling for these types, and we may need to make an alias with
11251 the old mangling. */
11252 if (FLOAT128_IEEE_P (mode
))
11253 rs6000_passes_ieee128
= true;
11255 if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11256 rs6000_passes_vector
= true;
11260 if (TARGET_ALTIVEC_ABI
11261 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11262 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11263 && int_size_in_bytes (type
) == 16)))
11265 bool stack
= false;
11267 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11269 cum
->vregno
+= n_elts
;
11271 if (!TARGET_ALTIVEC
)
11272 error ("cannot pass argument in vector register because"
11273 " altivec instructions are disabled, use %qs"
11274 " to enable them", "-maltivec");
11276 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11277 even if it is going to be passed in a vector register.
11278 Darwin does the same for variable-argument functions. */
11279 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11281 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11291 /* Vector parameters must be 16-byte aligned. In 32-bit
11292 mode this means we need to take into account the offset
11293 to the parameter save area. In 64-bit mode, they just
11294 have to start on an even word, since the parameter save
11295 area is 16-byte aligned. */
11297 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11299 align
= cum
->words
& 1;
11300 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11302 if (TARGET_DEBUG_ARG
)
11304 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11305 cum
->words
, align
);
11306 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11307 cum
->nargs_prototype
, cum
->prototype
,
11308 GET_MODE_NAME (mode
));
11312 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11314 int size
= int_size_in_bytes (type
);
11315 /* Variable sized types have size == -1 and are
11316 treated as if consisting entirely of ints.
11317 Pad to 16 byte boundary if needed. */
11318 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11319 && (cum
->words
% 2) != 0)
11321 /* For varargs, we can just go up by the size of the struct. */
11323 cum
->words
+= (size
+ 7) / 8;
11326 /* It is tempting to say int register count just goes up by
11327 sizeof(type)/8, but this is wrong in a case such as
11328 { int; double; int; } [powerpc alignment]. We have to
11329 grovel through the fields for these too. */
11330 cum
->intoffset
= 0;
11331 cum
->floats_in_gpr
= 0;
11332 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11333 rs6000_darwin64_record_arg_advance_flush (cum
,
11334 size
* BITS_PER_UNIT
, 1);
11336 if (TARGET_DEBUG_ARG
)
11338 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11339 cum
->words
, TYPE_ALIGN (type
), size
);
11341 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11342 cum
->nargs_prototype
, cum
->prototype
,
11343 GET_MODE_NAME (mode
));
11346 else if (DEFAULT_ABI
== ABI_V4
)
11348 if (abi_v4_pass_in_fpr (mode
, named
))
11350 /* _Decimal128 must use an even/odd register pair. This assumes
11351 that the register number is odd when fregno is odd. */
11352 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11355 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11356 <= FP_ARG_V4_MAX_REG
)
11357 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11360 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11361 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11362 || mode
== DDmode
|| mode
== TDmode
)
11363 cum
->words
+= cum
->words
& 1;
11364 cum
->words
+= rs6000_arg_size (mode
, type
);
11369 int n_words
= rs6000_arg_size (mode
, type
);
11370 int gregno
= cum
->sysv_gregno
;
11372 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11373 As does any other 2 word item such as complex int due to a
11374 historical mistake. */
11376 gregno
+= (1 - gregno
) & 1;
11378 /* Multi-reg args are not split between registers and stack. */
11379 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11381 /* Long long is aligned on the stack. So are other 2 word
11382 items such as complex int due to a historical mistake. */
11384 cum
->words
+= cum
->words
& 1;
11385 cum
->words
+= n_words
;
11388 /* Note: continuing to accumulate gregno past when we've started
11389 spilling to the stack indicates the fact that we've started
11390 spilling to the stack to expand_builtin_saveregs. */
11391 cum
->sysv_gregno
= gregno
+ n_words
;
11394 if (TARGET_DEBUG_ARG
)
11396 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11397 cum
->words
, cum
->fregno
);
11398 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11399 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11400 fprintf (stderr
, "mode = %4s, named = %d\n",
11401 GET_MODE_NAME (mode
), named
);
11406 int n_words
= rs6000_arg_size (mode
, type
);
11407 int start_words
= cum
->words
;
11408 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11410 cum
->words
= align_words
+ n_words
;
11412 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11414 /* _Decimal128 must be passed in an even/odd float register pair.
11415 This assumes that the register number is odd when fregno is
11417 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11419 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11422 if (TARGET_DEBUG_ARG
)
11424 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11425 cum
->words
, cum
->fregno
);
11426 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11427 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11428 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11429 named
, align_words
- start_words
, depth
);
11435 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
11436 const_tree type
, bool named
)
11438 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
11442 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11443 structure between cum->intoffset and bitpos to integer registers. */
11446 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
11447 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
11450 unsigned int regno
;
11451 unsigned int startbit
, endbit
;
11452 int this_regno
, intregs
, intoffset
;
11455 if (cum
->intoffset
== -1)
11458 intoffset
= cum
->intoffset
;
11459 cum
->intoffset
= -1;
11461 /* If this is the trailing part of a word, try to only load that
11462 much into the register. Otherwise load the whole register. Note
11463 that in the latter case we may pick up unwanted bits. It's not a
11464 problem at the moment but may wish to revisit. */
11466 if (intoffset
% BITS_PER_WORD
!= 0)
11468 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11469 if (!int_mode_for_size (bits
, 0).exists (&mode
))
11471 /* We couldn't find an appropriate mode, which happens,
11472 e.g., in packed structs when there are 3 bytes to load.
11473 Back intoffset back to the beginning of the word in this
11475 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11482 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11483 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11484 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11485 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
11487 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
11488 cum
->use_stack
= 1;
11490 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
11494 intoffset
/= BITS_PER_UNIT
;
11497 regno
= GP_ARG_MIN_REG
+ this_regno
;
11498 reg
= gen_rtx_REG (mode
, regno
);
11500 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
11503 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
11507 while (intregs
> 0);
11510 /* Recursive workhorse for the following. */
11513 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
11514 HOST_WIDE_INT startbitpos
, rtx rvec
[],
11519 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11520 if (TREE_CODE (f
) == FIELD_DECL
)
11522 HOST_WIDE_INT bitpos
= startbitpos
;
11523 tree ftype
= TREE_TYPE (f
);
11525 if (ftype
== error_mark_node
)
11527 mode
= TYPE_MODE (ftype
);
11529 if (DECL_SIZE (f
) != 0
11530 && tree_fits_uhwi_p (bit_position (f
)))
11531 bitpos
+= int_bit_position (f
);
11533 /* ??? FIXME: else assume zero offset. */
11535 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11536 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
11537 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
11539 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11543 case E_SCmode
: mode
= SFmode
; break;
11544 case E_DCmode
: mode
= DFmode
; break;
11545 case E_TCmode
: mode
= TFmode
; break;
11549 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
11550 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
11552 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
11553 && (mode
== TFmode
|| mode
== TDmode
));
11554 /* Long double or _Decimal128 split over regs and memory. */
11555 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
11559 = gen_rtx_EXPR_LIST (VOIDmode
,
11560 gen_rtx_REG (mode
, cum
->fregno
++),
11561 GEN_INT (bitpos
/ BITS_PER_UNIT
));
11562 if (FLOAT128_2REG_P (mode
))
11565 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11567 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
11569 = gen_rtx_EXPR_LIST (VOIDmode
,
11570 gen_rtx_REG (mode
, cum
->vregno
++),
11571 GEN_INT (bitpos
/ BITS_PER_UNIT
));
11573 else if (cum
->intoffset
== -1)
11574 cum
->intoffset
= bitpos
;
11578 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11579 the register(s) to be used for each field and subfield of a struct
11580 being passed by value, along with the offset of where the
11581 register's value may be found in the block. FP fields go in FP
11582 register, vector fields go in vector registers, and everything
11583 else goes in int registers, packed as in memory.
11585 This code is also used for function return values. RETVAL indicates
11586 whether this is the case.
11588 Much of this is taken from the SPARC V9 port, which has a similar
11589 calling convention. */
11592 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
11593 bool named
, bool retval
)
11595 rtx rvec
[FIRST_PSEUDO_REGISTER
];
11596 int k
= 1, kbase
= 1;
11597 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
11598 /* This is a copy; modifications are not visible to our caller. */
11599 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
11600 CUMULATIVE_ARGS
*cum
= ©_cum
;
11602 /* Pad to 16 byte boundary if needed. */
11603 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11604 && (cum
->words
% 2) != 0)
11607 cum
->intoffset
= 0;
11608 cum
->use_stack
= 0;
11609 cum
->named
= named
;
11611 /* Put entries into rvec[] for individual FP and vector fields, and
11612 for the chunks of memory that go in int regs. Note we start at
11613 element 1; 0 is reserved for an indication of using memory, and
11614 may or may not be filled in below. */
11615 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
11616 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
11618 /* If any part of the struct went on the stack put all of it there.
11619 This hack is because the generic code for
11620 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11621 parts of the struct are not at the beginning. */
11622 if (cum
->use_stack
)
11625 return NULL_RTX
; /* doesn't go in registers at all */
11627 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
11629 if (k
> 1 || cum
->use_stack
)
11630 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
11635 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11638 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
11643 rtx rvec
[GP_ARG_NUM_REG
+ 1];
11645 if (align_words
>= GP_ARG_NUM_REG
)
11648 n_units
= rs6000_arg_size (mode
, type
);
11650 /* Optimize the simple case where the arg fits in one gpr, except in
11651 the case of BLKmode due to assign_parms assuming that registers are
11652 BITS_PER_WORD wide. */
11654 || (n_units
== 1 && mode
!= BLKmode
))
11655 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
11658 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
11659 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11660 using a magic NULL_RTX component.
11661 This is not strictly correct. Only some of the arg belongs in
11662 memory, not all of it. However, the normal scheme using
11663 function_arg_partial_nregs can result in unusual subregs, eg.
11664 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11665 store the whole arg to memory is often more efficient than code
11666 to store pieces, and we know that space is available in the right
11667 place for the whole arg. */
11668 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
11673 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
11674 rtx off
= GEN_INT (i
++ * 4);
11675 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
11677 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
11679 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
11682 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11683 but must also be copied into the parameter save area starting at
11684 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11685 to the GPRs and/or memory. Return the number of elements used. */
11688 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
11689 int align_words
, rtx
*rvec
)
11693 if (align_words
< GP_ARG_NUM_REG
)
11695 int n_words
= rs6000_arg_size (mode
, type
);
11697 if (align_words
+ n_words
> GP_ARG_NUM_REG
11699 || (TARGET_32BIT
&& TARGET_POWERPC64
))
11701 /* If this is partially on the stack, then we only
11702 include the portion actually in registers here. */
11703 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
11706 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
11708 /* Not all of the arg fits in gprs. Say that it goes in memory
11709 too, using a magic NULL_RTX component. Also see comment in
11710 rs6000_mixed_function_arg for why the normal
11711 function_arg_partial_nregs scheme doesn't work in this case. */
11712 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
11717 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
11718 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
11719 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
11721 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
11725 /* The whole arg fits in gprs. */
11726 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
11727 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
11732 /* It's entirely in memory. */
11733 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
11739 /* RVEC is a vector of K components of an argument of mode MODE.
11740 Construct the final function_arg return value from it. */
11743 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
11745 gcc_assert (k
>= 1);
11747 /* Avoid returning a PARALLEL in the trivial cases. */
11750 if (XEXP (rvec
[0], 0) == NULL_RTX
)
11753 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
11754 return XEXP (rvec
[0], 0);
11757 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
11760 /* Determine where to put an argument to a function.
11761 Value is zero to push the argument on the stack,
11762 or a hard register in which to store the argument.
11764 MODE is the argument's machine mode.
11765 TYPE is the data type of the argument (as a tree).
11766 This is null for libcalls where that information may
11768 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11769 the preceding args and about the function being called. It is
11770 not modified in this routine.
11771 NAMED is nonzero if this argument is a named parameter
11772 (otherwise it is an extra parameter matching an ellipsis).
11774 On RS/6000 the first eight words of non-FP are normally in registers
11775 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11776 Under V.4, the first 8 FP args are in registers.
11778 If this is floating-point and no prototype is specified, we use
11779 both an FP and integer register (or possibly FP reg and stack). Library
11780 functions (when CALL_LIBCALL is set) always have the proper types for args,
11781 so we can pass the FP value just in one register. emit_library_function
11782 doesn't support PARALLEL anyway.
11784 Note that for args passed by reference, function_arg will be called
11785 with MODE and TYPE set to that of the pointer to the arg, not the arg
11789 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
11790 const_tree type
, bool named
)
11792 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
11793 enum rs6000_abi abi
= DEFAULT_ABI
;
11794 machine_mode elt_mode
;
11797 /* Return a marker to indicate whether CR1 needs to set or clear the
11798 bit that V.4 uses to say fp args were passed in registers.
11799 Assume that we don't need the marker for software floating point,
11800 or compiler generated library calls. */
11801 if (mode
== VOIDmode
)
11804 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
11806 || (cum
->nargs_prototype
< 0
11807 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
11808 && TARGET_HARD_FLOAT
)
11809 return GEN_INT (cum
->call_cookie
11810 | ((cum
->fregno
== FP_ARG_MIN_REG
)
11811 ? CALL_V4_SET_FP_ARGS
11812 : CALL_V4_CLEAR_FP_ARGS
));
11814 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
11817 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11819 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11821 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
11822 if (rslt
!= NULL_RTX
)
11824 /* Else fall through to usual handling. */
11827 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11829 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
11833 /* Do we also need to pass this argument in the parameter save area?
11834 Library support functions for IEEE 128-bit are assumed to not need the
11835 value passed both in GPRs and in vector registers. */
11836 if (TARGET_64BIT
&& !cum
->prototype
11837 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
11839 int align_words
= ROUND_UP (cum
->words
, 2);
11840 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
11843 /* Describe where this argument goes in the vector registers. */
11844 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
11846 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
11847 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
11848 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
11851 return rs6000_finish_function_arg (mode
, rvec
, k
);
11853 else if (TARGET_ALTIVEC_ABI
11854 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
11855 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11856 && int_size_in_bytes (type
) == 16)))
11858 if (named
|| abi
== ABI_V4
)
11862 /* Vector parameters to varargs functions under AIX or Darwin
11863 get passed in memory and possibly also in GPRs. */
11864 int align
, align_words
, n_words
;
11865 machine_mode part_mode
;
11867 /* Vector parameters must be 16-byte aligned. In 32-bit
11868 mode this means we need to take into account the offset
11869 to the parameter save area. In 64-bit mode, they just
11870 have to start on an even word, since the parameter save
11871 area is 16-byte aligned. */
11873 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11875 align
= cum
->words
& 1;
11876 align_words
= cum
->words
+ align
;
11878 /* Out of registers? Memory, then. */
11879 if (align_words
>= GP_ARG_NUM_REG
)
11882 if (TARGET_32BIT
&& TARGET_POWERPC64
)
11883 return rs6000_mixed_function_arg (mode
, type
, align_words
);
11885 /* The vector value goes in GPRs. Only the part of the
11886 value in GPRs is reported here. */
11888 n_words
= rs6000_arg_size (mode
, type
);
11889 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
11890 /* Fortunately, there are only two possibilities, the value
11891 is either wholly in GPRs or half in GPRs and half not. */
11892 part_mode
= DImode
;
11894 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
11898 else if (abi
== ABI_V4
)
11900 if (abi_v4_pass_in_fpr (mode
, named
))
11902 /* _Decimal128 must use an even/odd register pair. This assumes
11903 that the register number is odd when fregno is odd. */
11904 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11907 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11908 <= FP_ARG_V4_MAX_REG
)
11909 return gen_rtx_REG (mode
, cum
->fregno
);
11915 int n_words
= rs6000_arg_size (mode
, type
);
11916 int gregno
= cum
->sysv_gregno
;
11918 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11919 As does any other 2 word item such as complex int due to a
11920 historical mistake. */
11922 gregno
+= (1 - gregno
) & 1;
11924 /* Multi-reg args are not split between registers and stack. */
11925 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11928 if (TARGET_32BIT
&& TARGET_POWERPC64
)
11929 return rs6000_mixed_function_arg (mode
, type
,
11930 gregno
- GP_ARG_MIN_REG
);
11931 return gen_rtx_REG (mode
, gregno
);
11936 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
11938 /* _Decimal128 must be passed in an even/odd float register pair.
11939 This assumes that the register number is odd when fregno is odd. */
11940 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11943 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
11945 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
11948 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
11951 /* Do we also need to pass this argument in the parameter
11953 if (type
&& (cum
->nargs_prototype
<= 0
11954 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11955 && TARGET_XL_COMPAT
11956 && align_words
>= GP_ARG_NUM_REG
)))
11957 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
11959 /* Describe where this argument goes in the fprs. */
11960 for (i
= 0; i
< n_elts
11961 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
11963 /* Check if the argument is split over registers and memory.
11964 This can only ever happen for long double or _Decimal128;
11965 complex types are handled via split_complex_arg. */
11966 machine_mode fmode
= elt_mode
;
11967 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
11969 gcc_assert (FLOAT128_2REG_P (fmode
));
11970 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
11973 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
11974 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
11975 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
11978 /* If there were not enough FPRs to hold the argument, the rest
11979 usually goes into memory. However, if the current position
11980 is still within the register parameter area, a portion may
11981 actually have to go into GPRs.
11983 Note that it may happen that the portion of the argument
11984 passed in the first "half" of the first GPR was already
11985 passed in the last FPR as well.
11987 For unnamed arguments, we already set up GPRs to cover the
11988 whole argument in rs6000_psave_function_arg, so there is
11989 nothing further to do at this point. */
11990 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
11991 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
11992 && cum
->nargs_prototype
> 0)
11994 static bool warned
;
11996 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
11997 int n_words
= rs6000_arg_size (mode
, type
);
11999 align_words
+= fpr_words
;
12000 n_words
-= fpr_words
;
12004 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12005 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12006 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12008 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12010 if (!warned
&& warn_psabi
)
12013 inform (input_location
,
12014 "the ABI of passing homogeneous float aggregates"
12015 " has changed in GCC 5");
12019 return rs6000_finish_function_arg (mode
, rvec
, k
);
12021 else if (align_words
< GP_ARG_NUM_REG
)
12023 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12024 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12026 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12033 /* For an arg passed partly in registers and partly in memory, this is
12034 the number of bytes passed in registers. For args passed entirely in
12035 registers or entirely in memory, zero. When an arg is described by a
12036 PARALLEL, perhaps using more than one register type, this function
12037 returns the number of bytes used by the first element of the PARALLEL. */
12040 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12041 tree type
, bool named
)
12043 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12044 bool passed_in_gprs
= true;
12047 machine_mode elt_mode
;
12050 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12052 if (DEFAULT_ABI
== ABI_V4
)
12055 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12057 /* If we are passing this arg in the fixed parameter save area (gprs or
12058 memory) as well as VRs, we do not use the partial bytes mechanism;
12059 instead, rs6000_function_arg will return a PARALLEL including a memory
12060 element as necessary. Library support functions for IEEE 128-bit are
12061 assumed to not need the value passed both in GPRs and in vector
12063 if (TARGET_64BIT
&& !cum
->prototype
12064 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12067 /* Otherwise, we pass in VRs only. Check for partial copies. */
12068 passed_in_gprs
= false;
12069 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12070 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12073 /* In this complicated case we just disable the partial_nregs code. */
12074 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12077 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12079 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12081 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12083 /* If we are passing this arg in the fixed parameter save area
12084 (gprs or memory) as well as FPRs, we do not use the partial
12085 bytes mechanism; instead, rs6000_function_arg will return a
12086 PARALLEL including a memory element as necessary. */
12088 && (cum
->nargs_prototype
<= 0
12089 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12090 && TARGET_XL_COMPAT
12091 && align_words
>= GP_ARG_NUM_REG
)))
12094 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12095 passed_in_gprs
= false;
12096 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12098 /* Compute number of bytes / words passed in FPRs. If there
12099 is still space available in the register parameter area
12100 *after* that amount, a part of the argument will be passed
12101 in GPRs. In that case, the total amount passed in any
12102 registers is equal to the amount that would have been passed
12103 in GPRs if everything were passed there, so we fall back to
12104 the GPR code below to compute the appropriate value. */
12105 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12106 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12107 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12109 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12110 passed_in_gprs
= true;
12117 && align_words
< GP_ARG_NUM_REG
12118 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12119 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12121 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12122 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12127 /* A C expression that indicates when an argument must be passed by
12128 reference. If nonzero for an argument, a copy of that argument is
12129 made in memory and a pointer to the argument is passed instead of
12130 the argument itself. The pointer is passed in whatever way is
12131 appropriate for passing a pointer to that type.
12133 Under V.4, aggregates and long double are passed by reference.
12135 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12136 reference unless the AltiVec vector extension ABI is in force.
12138 As an extension to all ABIs, variable sized types are passed by
12142 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12143 machine_mode mode
, const_tree type
,
12144 bool named ATTRIBUTE_UNUSED
)
12149 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12150 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12152 if (TARGET_DEBUG_ARG
)
12153 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12157 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12159 if (TARGET_DEBUG_ARG
)
12160 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12164 if (int_size_in_bytes (type
) < 0)
12166 if (TARGET_DEBUG_ARG
)
12167 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12171 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12172 modes only exist for GCC vector types if -maltivec. */
12173 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12175 if (TARGET_DEBUG_ARG
)
12176 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12180 /* Pass synthetic vectors in memory. */
12181 if (TREE_CODE (type
) == VECTOR_TYPE
12182 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12184 static bool warned_for_pass_big_vectors
= false;
12185 if (TARGET_DEBUG_ARG
)
12186 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12187 if (!warned_for_pass_big_vectors
)
12189 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12190 "non-standard ABI extension with no compatibility "
12192 warned_for_pass_big_vectors
= true;
12200 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12201 already processes. Return true if the parameter must be passed
12202 (fully or partially) on the stack. */
12205 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12211 /* Catch errors. */
12212 if (type
== NULL
|| type
== error_mark_node
)
12215 /* Handle types with no storage requirement. */
12216 if (TYPE_MODE (type
) == VOIDmode
)
12219 /* Handle complex types. */
12220 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12221 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12222 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12224 /* Handle transparent aggregates. */
12225 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12226 && TYPE_TRANSPARENT_AGGR (type
))
12227 type
= TREE_TYPE (first_field (type
));
12229 /* See if this arg was passed by invisible reference. */
12230 if (pass_by_reference (get_cumulative_args (args_so_far
),
12231 TYPE_MODE (type
), type
, true))
12232 type
= build_pointer_type (type
);
12234 /* Find mode as it is passed by the ABI. */
12235 unsignedp
= TYPE_UNSIGNED (type
);
12236 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12238 /* If we must pass in stack, we need a stack. */
12239 if (rs6000_must_pass_in_stack (mode
, type
))
12242 /* If there is no incoming register, we need a stack. */
12243 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12244 if (entry_parm
== NULL
)
12247 /* Likewise if we need to pass both in registers and on the stack. */
12248 if (GET_CODE (entry_parm
) == PARALLEL
12249 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12252 /* Also true if we're partially in registers and partially not. */
12253 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12256 /* Update info on where next arg arrives in registers. */
12257 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12261 /* Return true if FUN has no prototype, has a variable argument
12262 list, or passes any parameter in memory. */
12265 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12267 tree fntype
, result
;
12268 CUMULATIVE_ARGS args_so_far_v
;
12269 cumulative_args_t args_so_far
;
12272 /* Must be a libcall, all of which only use reg parms. */
12277 fntype
= TREE_TYPE (fun
);
12279 /* Varargs functions need the parameter save area. */
12280 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12283 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12284 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12286 /* When incoming, we will have been passed the function decl.
12287 It is necessary to use the decl to handle K&R style functions,
12288 where TYPE_ARG_TYPES may not be available. */
12291 gcc_assert (DECL_P (fun
));
12292 result
= DECL_RESULT (fun
);
12295 result
= TREE_TYPE (fntype
);
12297 if (result
&& aggregate_value_p (result
, fntype
))
12299 if (!TYPE_P (result
))
12300 result
= TREE_TYPE (result
);
12301 result
= build_pointer_type (result
);
12302 rs6000_parm_needs_stack (args_so_far
, result
);
12309 for (parm
= DECL_ARGUMENTS (fun
);
12310 parm
&& parm
!= void_list_node
;
12311 parm
= TREE_CHAIN (parm
))
12312 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12317 function_args_iterator args_iter
;
12320 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12321 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12328 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12329 usually a constant depending on the ABI. However, in the ELFv2 ABI
12330 the register parameter area is optional when calling a function that
12331 has a prototype is scope, has no variable argument list, and passes
12332 all parameters in registers. */
12335 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12337 int reg_parm_stack_space
;
12339 switch (DEFAULT_ABI
)
12342 reg_parm_stack_space
= 0;
12347 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12351 /* ??? Recomputing this every time is a bit expensive. Is there
12352 a place to cache this information? */
12353 if (rs6000_function_parms_need_stack (fun
, incoming
))
12354 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12356 reg_parm_stack_space
= 0;
12360 return reg_parm_stack_space
;
12364 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12367 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12372 for (i
= 0; i
< nregs
; i
++)
12374 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12375 if (reload_completed
)
12377 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12380 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12381 i
* GET_MODE_SIZE (reg_mode
));
12384 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12388 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12392 /* Perform any needed actions needed for a function that is receiving a
12393 variable number of arguments.
12397 MODE and TYPE are the mode and type of the current parameter.
12399 PRETEND_SIZE is a variable that should be set to the amount of stack
12400 that must be pushed by the prolog to pretend that our caller pushed
12403 Normally, this macro will push all remaining incoming registers on the
12404 stack and set PRETEND_SIZE to the length of the registers pushed. */
12407 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12408 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12411 CUMULATIVE_ARGS next_cum
;
12412 int reg_size
= TARGET_32BIT
? 4 : 8;
12413 rtx save_area
= NULL_RTX
, mem
;
12414 int first_reg_offset
;
12415 alias_set_type set
;
12417 /* Skip the last named argument. */
12418 next_cum
= *get_cumulative_args (cum
);
12419 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12421 if (DEFAULT_ABI
== ABI_V4
)
12423 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12427 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12428 HOST_WIDE_INT offset
= 0;
12430 /* Try to optimize the size of the varargs save area.
12431 The ABI requires that ap.reg_save_area is doubleword
12432 aligned, but we don't need to allocate space for all
12433 the bytes, only those to which we actually will save
12435 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
12436 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
12437 if (TARGET_HARD_FLOAT
12438 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12439 && cfun
->va_list_fpr_size
)
12442 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
12443 * UNITS_PER_FP_WORD
;
12444 if (cfun
->va_list_fpr_size
12445 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12446 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
12448 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12449 * UNITS_PER_FP_WORD
;
12453 offset
= -((first_reg_offset
* reg_size
) & ~7);
12454 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
12456 gpr_reg_num
= cfun
->va_list_gpr_size
;
12457 if (reg_size
== 4 && (first_reg_offset
& 1))
12460 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
12463 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
12464 * UNITS_PER_FP_WORD
12465 - (int) (GP_ARG_NUM_REG
* reg_size
);
12467 if (gpr_size
+ fpr_size
)
12470 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
12471 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
12472 reg_save_area
= XEXP (reg_save_area
, 0);
12473 if (GET_CODE (reg_save_area
) == PLUS
)
12475 gcc_assert (XEXP (reg_save_area
, 0)
12476 == virtual_stack_vars_rtx
);
12477 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
12478 offset
+= INTVAL (XEXP (reg_save_area
, 1));
12481 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
12484 cfun
->machine
->varargs_save_offset
= offset
;
12485 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
12490 first_reg_offset
= next_cum
.words
;
12491 save_area
= crtl
->args
.internal_arg_pointer
;
12493 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
12494 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
12497 set
= get_varargs_alias_set ();
12498 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
12499 && cfun
->va_list_gpr_size
)
12501 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
12503 if (va_list_gpr_counter_field
)
12504 /* V4 va_list_gpr_size counts number of registers needed. */
12505 n_gpr
= cfun
->va_list_gpr_size
;
12507 /* char * va_list instead counts number of bytes needed. */
12508 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
12513 mem
= gen_rtx_MEM (BLKmode
,
12514 plus_constant (Pmode
, save_area
,
12515 first_reg_offset
* reg_size
));
12516 MEM_NOTRAP_P (mem
) = 1;
12517 set_mem_alias_set (mem
, set
);
12518 set_mem_align (mem
, BITS_PER_WORD
);
12520 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
12524 /* Save FP registers if needed. */
12525 if (DEFAULT_ABI
== ABI_V4
12526 && TARGET_HARD_FLOAT
12528 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12529 && cfun
->va_list_fpr_size
)
12531 int fregno
= next_cum
.fregno
, nregs
;
12532 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
12533 rtx lab
= gen_label_rtx ();
12534 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
12535 * UNITS_PER_FP_WORD
);
12538 (gen_rtx_SET (pc_rtx
,
12539 gen_rtx_IF_THEN_ELSE (VOIDmode
,
12540 gen_rtx_NE (VOIDmode
, cr1
,
12542 gen_rtx_LABEL_REF (VOIDmode
, lab
),
12546 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
12547 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
12549 mem
= gen_rtx_MEM (TARGET_HARD_FLOAT
? DFmode
: SFmode
,
12550 plus_constant (Pmode
, save_area
, off
));
12551 MEM_NOTRAP_P (mem
) = 1;
12552 set_mem_alias_set (mem
, set
);
12553 set_mem_align (mem
, GET_MODE_ALIGNMENT (
12554 TARGET_HARD_FLOAT
? DFmode
: SFmode
));
12555 emit_move_insn (mem
, gen_rtx_REG (
12556 TARGET_HARD_FLOAT
? DFmode
: SFmode
, fregno
));
12563 /* Create the va_list data type. */
12566 rs6000_build_builtin_va_list (void)
12568 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
12570 /* For AIX, prefer 'char *' because that's what the system
12571 header files like. */
12572 if (DEFAULT_ABI
!= ABI_V4
)
12573 return build_pointer_type (char_type_node
);
12575 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
12576 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
12577 get_identifier ("__va_list_tag"), record
);
12579 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
12580 unsigned_char_type_node
);
12581 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
12582 unsigned_char_type_node
);
12583 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12584 every user file. */
12585 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
12586 get_identifier ("reserved"), short_unsigned_type_node
);
12587 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
12588 get_identifier ("overflow_arg_area"),
12590 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
12591 get_identifier ("reg_save_area"),
12594 va_list_gpr_counter_field
= f_gpr
;
12595 va_list_fpr_counter_field
= f_fpr
;
12597 DECL_FIELD_CONTEXT (f_gpr
) = record
;
12598 DECL_FIELD_CONTEXT (f_fpr
) = record
;
12599 DECL_FIELD_CONTEXT (f_res
) = record
;
12600 DECL_FIELD_CONTEXT (f_ovf
) = record
;
12601 DECL_FIELD_CONTEXT (f_sav
) = record
;
12603 TYPE_STUB_DECL (record
) = type_decl
;
12604 TYPE_NAME (record
) = type_decl
;
12605 TYPE_FIELDS (record
) = f_gpr
;
12606 DECL_CHAIN (f_gpr
) = f_fpr
;
12607 DECL_CHAIN (f_fpr
) = f_res
;
12608 DECL_CHAIN (f_res
) = f_ovf
;
12609 DECL_CHAIN (f_ovf
) = f_sav
;
12611 layout_type (record
);
12613 /* The correct type is an array type of one element. */
12614 return build_array_type (record
, build_index_type (size_zero_node
));
12617 /* Implement va_start. */
12620 rs6000_va_start (tree valist
, rtx nextarg
)
12622 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
12623 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
12624 tree gpr
, fpr
, ovf
, sav
, t
;
12626 /* Only SVR4 needs something special. */
12627 if (DEFAULT_ABI
!= ABI_V4
)
12629 std_expand_builtin_va_start (valist
, nextarg
);
12633 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
12634 f_fpr
= DECL_CHAIN (f_gpr
);
12635 f_res
= DECL_CHAIN (f_fpr
);
12636 f_ovf
= DECL_CHAIN (f_res
);
12637 f_sav
= DECL_CHAIN (f_ovf
);
12639 valist
= build_simple_mem_ref (valist
);
12640 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
12641 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
12643 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
12645 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
12648 /* Count number of gp and fp argument registers used. */
12649 words
= crtl
->args
.info
.words
;
12650 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
12652 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
12655 if (TARGET_DEBUG_ARG
)
12656 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
12657 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
12658 words
, n_gpr
, n_fpr
);
12660 if (cfun
->va_list_gpr_size
)
12662 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
12663 build_int_cst (NULL_TREE
, n_gpr
));
12664 TREE_SIDE_EFFECTS (t
) = 1;
12665 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
12668 if (cfun
->va_list_fpr_size
)
12670 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
12671 build_int_cst (NULL_TREE
, n_fpr
));
12672 TREE_SIDE_EFFECTS (t
) = 1;
12673 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
12675 #ifdef HAVE_AS_GNU_ATTRIBUTE
12676 if (call_ABI_of_interest (cfun
->decl
))
12677 rs6000_passes_float
= true;
12681 /* Find the overflow area. */
12682 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
12684 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
12685 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
12686 TREE_SIDE_EFFECTS (t
) = 1;
12687 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
12689 /* If there were no va_arg invocations, don't set up the register
12691 if (!cfun
->va_list_gpr_size
12692 && !cfun
->va_list_fpr_size
12693 && n_gpr
< GP_ARG_NUM_REG
12694 && n_fpr
< FP_ARG_V4_MAX_REG
)
12697 /* Find the register save area. */
12698 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
12699 if (cfun
->machine
->varargs_save_offset
)
12700 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
12701 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
12702 TREE_SIDE_EFFECTS (t
) = 1;
12703 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
12706 /* Implement va_arg. */
12709 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
12710 gimple_seq
*post_p
)
12712 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
12713 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
12714 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
12715 tree lab_false
, lab_over
, addr
;
12717 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
12721 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
12723 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
12724 return build_va_arg_indirect_ref (t
);
12727 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12728 earlier version of gcc, with the property that it always applied alignment
12729 adjustments to the va-args (even for zero-sized types). The cheapest way
12730 to deal with this is to replicate the effect of the part of
12731 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12733 We don't need to check for pass-by-reference because of the test above.
12734 We can return a simplifed answer, since we know there's no offset to add. */
12737 && rs6000_darwin64_abi
)
12738 || DEFAULT_ABI
== ABI_ELFv2
12739 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
12740 && integer_zerop (TYPE_SIZE (type
)))
12742 unsigned HOST_WIDE_INT align
, boundary
;
12743 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
12744 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
12745 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
12746 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
12747 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
12748 boundary
/= BITS_PER_UNIT
;
12749 if (boundary
> align
)
12752 /* This updates arg ptr by the amount that would be necessary
12753 to align the zero-sized (but not zero-alignment) item. */
12754 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
12755 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
12756 gimplify_and_add (t
, pre_p
);
12758 t
= fold_convert (sizetype
, valist_tmp
);
12759 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
12760 fold_convert (TREE_TYPE (valist
),
12761 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
12762 size_int (-boundary
))));
12763 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
12764 gimplify_and_add (t
, pre_p
);
12766 /* Since it is zero-sized there's no increment for the item itself. */
12767 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
12768 return build_va_arg_indirect_ref (valist_tmp
);
12771 if (DEFAULT_ABI
!= ABI_V4
)
12773 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
12775 tree elem_type
= TREE_TYPE (type
);
12776 machine_mode elem_mode
= TYPE_MODE (elem_type
);
12777 int elem_size
= GET_MODE_SIZE (elem_mode
);
12779 if (elem_size
< UNITS_PER_WORD
)
12781 tree real_part
, imag_part
;
12782 gimple_seq post
= NULL
;
12784 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
12786 /* Copy the value into a temporary, lest the formal temporary
12787 be reused out from under us. */
12788 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
12789 gimple_seq_add_seq (pre_p
, post
);
12791 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
12794 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
12798 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
12801 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
12802 f_fpr
= DECL_CHAIN (f_gpr
);
12803 f_res
= DECL_CHAIN (f_fpr
);
12804 f_ovf
= DECL_CHAIN (f_res
);
12805 f_sav
= DECL_CHAIN (f_ovf
);
12807 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
12808 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
12810 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
12812 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
12815 size
= int_size_in_bytes (type
);
12816 rsize
= (size
+ 3) / 4;
12817 int pad
= 4 * rsize
- size
;
12820 machine_mode mode
= TYPE_MODE (type
);
12821 if (abi_v4_pass_in_fpr (mode
, false))
12823 /* FP args go in FP registers, if present. */
12825 n_reg
= (size
+ 7) / 8;
12826 sav_ofs
= (TARGET_HARD_FLOAT
? 8 : 4) * 4;
12827 sav_scale
= (TARGET_HARD_FLOAT
? 8 : 4);
12828 if (mode
!= SFmode
&& mode
!= SDmode
)
12833 /* Otherwise into GP registers. */
12842 /* Pull the value out of the saved registers.... */
12845 addr
= create_tmp_var (ptr_type_node
, "addr");
12847 /* AltiVec vectors never go in registers when -mabi=altivec. */
12848 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12852 lab_false
= create_artificial_label (input_location
);
12853 lab_over
= create_artificial_label (input_location
);
12855 /* Long long is aligned in the registers. As are any other 2 gpr
12856 item such as complex int due to a historical mistake. */
12858 if (n_reg
== 2 && reg
== gpr
)
12861 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
12862 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
12863 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
12864 unshare_expr (reg
), u
);
12866 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12867 reg number is 0 for f1, so we want to make it odd. */
12868 else if (reg
== fpr
&& mode
== TDmode
)
12870 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
12871 build_int_cst (TREE_TYPE (reg
), 1));
12872 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
12875 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
12876 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
12877 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
12878 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
12879 gimplify_and_add (t
, pre_p
);
12883 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
12885 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
12886 build_int_cst (TREE_TYPE (reg
), n_reg
));
12887 u
= fold_convert (sizetype
, u
);
12888 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
12889 t
= fold_build_pointer_plus (t
, u
);
12891 /* _Decimal32 varargs are located in the second word of the 64-bit
12892 FP register for 32-bit binaries. */
12893 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
12894 t
= fold_build_pointer_plus_hwi (t
, size
);
12896 /* Args are passed right-aligned. */
12897 if (BYTES_BIG_ENDIAN
)
12898 t
= fold_build_pointer_plus_hwi (t
, pad
);
12900 gimplify_assign (addr
, t
, pre_p
);
12902 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
12904 stmt
= gimple_build_label (lab_false
);
12905 gimple_seq_add_stmt (pre_p
, stmt
);
12907 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
12909 /* Ensure that we don't find any more args in regs.
12910 Alignment has taken care of for special cases. */
12911 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
12915 /* ... otherwise out of the overflow area. */
12917 /* Care for on-stack alignment if needed. */
12921 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
12922 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
12923 build_int_cst (TREE_TYPE (t
), -align
));
12926 /* Args are passed right-aligned. */
12927 if (BYTES_BIG_ENDIAN
)
12928 t
= fold_build_pointer_plus_hwi (t
, pad
);
12930 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
12932 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
12934 t
= fold_build_pointer_plus_hwi (t
, size
);
12935 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
12939 stmt
= gimple_build_label (lab_over
);
12940 gimple_seq_add_stmt (pre_p
, stmt
);
12943 if (STRICT_ALIGNMENT
12944 && (TYPE_ALIGN (type
)
12945 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
12947 /* The value (of type complex double, for example) may not be
12948 aligned in memory in the saved registers, so copy via a
12949 temporary. (This is the same code as used for SPARC.) */
12950 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
12951 tree dest_addr
= build_fold_addr_expr (tmp
);
12953 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
12954 3, dest_addr
, addr
, size_int (rsize
* 4));
12955 TREE_ADDRESSABLE (tmp
) = 1;
12957 gimplify_and_add (copy
, pre_p
);
12961 addr
= fold_convert (ptrtype
, addr
);
12962 return build_va_arg_indirect_ref (addr
);
12968 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
12971 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
12972 const char *attr_string
= "";
12974 gcc_assert (name
!= NULL
);
12975 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
12977 if (rs6000_builtin_decls
[(int)code
])
12978 fatal_error (input_location
,
12979 "internal error: builtin function %qs already processed",
12982 rs6000_builtin_decls
[(int)code
] = t
=
12983 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
12985 /* Set any special attributes. */
12986 if ((classify
& RS6000_BTC_CONST
) != 0)
12988 /* const function, function only depends on the inputs. */
12989 TREE_READONLY (t
) = 1;
12990 TREE_NOTHROW (t
) = 1;
12991 attr_string
= ", const";
12993 else if ((classify
& RS6000_BTC_PURE
) != 0)
12995 /* pure function, function can read global memory, but does not set any
12997 DECL_PURE_P (t
) = 1;
12998 TREE_NOTHROW (t
) = 1;
12999 attr_string
= ", pure";
13001 else if ((classify
& RS6000_BTC_FP
) != 0)
13003 /* Function is a math function. If rounding mode is on, then treat the
13004 function as not reading global memory, but it can have arbitrary side
13005 effects. If it is off, then assume the function is a const function.
13006 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13007 builtin-attribute.def that is used for the math functions. */
13008 TREE_NOTHROW (t
) = 1;
13009 if (flag_rounding_math
)
13011 DECL_PURE_P (t
) = 1;
13012 DECL_IS_NOVOPS (t
) = 1;
13013 attr_string
= ", fp, pure";
13017 TREE_READONLY (t
) = 1;
13018 attr_string
= ", fp, const";
13021 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13022 gcc_unreachable ();
13024 if (TARGET_DEBUG_BUILTIN
)
13025 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13026 (int)code
, name
, attr_string
);
13029 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13031 #undef RS6000_BUILTIN_0
13032 #undef RS6000_BUILTIN_1
13033 #undef RS6000_BUILTIN_2
13034 #undef RS6000_BUILTIN_3
13035 #undef RS6000_BUILTIN_A
13036 #undef RS6000_BUILTIN_D
13037 #undef RS6000_BUILTIN_H
13038 #undef RS6000_BUILTIN_P
13039 #undef RS6000_BUILTIN_X
13041 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13042 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13043 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13044 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13045 { MASK, ICODE, NAME, ENUM },
13047 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13048 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13049 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13050 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13051 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13053 static const struct builtin_description bdesc_3arg
[] =
13055 #include "rs6000-builtin.def"
13058 /* DST operations: void foo (void *, const int, const char). */
13060 #undef RS6000_BUILTIN_0
13061 #undef RS6000_BUILTIN_1
13062 #undef RS6000_BUILTIN_2
13063 #undef RS6000_BUILTIN_3
13064 #undef RS6000_BUILTIN_A
13065 #undef RS6000_BUILTIN_D
13066 #undef RS6000_BUILTIN_H
13067 #undef RS6000_BUILTIN_P
13068 #undef RS6000_BUILTIN_X
13070 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13071 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13072 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13073 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13074 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13076 { MASK, ICODE, NAME, ENUM },
13078 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13079 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13080 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13082 static const struct builtin_description bdesc_dst
[] =
13084 #include "rs6000-builtin.def"
13087 /* Simple binary operations: VECc = foo (VECa, VECb). */
13089 #undef RS6000_BUILTIN_0
13090 #undef RS6000_BUILTIN_1
13091 #undef RS6000_BUILTIN_2
13092 #undef RS6000_BUILTIN_3
13093 #undef RS6000_BUILTIN_A
13094 #undef RS6000_BUILTIN_D
13095 #undef RS6000_BUILTIN_H
13096 #undef RS6000_BUILTIN_P
13097 #undef RS6000_BUILTIN_X
13099 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13102 { MASK, ICODE, NAME, ENUM },
13104 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13108 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13109 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13111 static const struct builtin_description bdesc_2arg
[] =
13113 #include "rs6000-builtin.def"
13116 #undef RS6000_BUILTIN_0
13117 #undef RS6000_BUILTIN_1
13118 #undef RS6000_BUILTIN_2
13119 #undef RS6000_BUILTIN_3
13120 #undef RS6000_BUILTIN_A
13121 #undef RS6000_BUILTIN_D
13122 #undef RS6000_BUILTIN_H
13123 #undef RS6000_BUILTIN_P
13124 #undef RS6000_BUILTIN_X
13126 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13127 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13128 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13129 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13130 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13131 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13134 { MASK, ICODE, NAME, ENUM },
13136 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13138 /* AltiVec predicates. */
13140 static const struct builtin_description bdesc_altivec_preds
[] =
13142 #include "rs6000-builtin.def"
13145 /* ABS* operations. */
13147 #undef RS6000_BUILTIN_0
13148 #undef RS6000_BUILTIN_1
13149 #undef RS6000_BUILTIN_2
13150 #undef RS6000_BUILTIN_3
13151 #undef RS6000_BUILTIN_A
13152 #undef RS6000_BUILTIN_D
13153 #undef RS6000_BUILTIN_H
13154 #undef RS6000_BUILTIN_P
13155 #undef RS6000_BUILTIN_X
13157 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13159 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13162 { MASK, ICODE, NAME, ENUM },
13164 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13165 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13166 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13169 static const struct builtin_description bdesc_abs
[] =
13171 #include "rs6000-builtin.def"
13174 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13177 #undef RS6000_BUILTIN_0
13178 #undef RS6000_BUILTIN_1
13179 #undef RS6000_BUILTIN_2
13180 #undef RS6000_BUILTIN_3
13181 #undef RS6000_BUILTIN_A
13182 #undef RS6000_BUILTIN_D
13183 #undef RS6000_BUILTIN_H
13184 #undef RS6000_BUILTIN_P
13185 #undef RS6000_BUILTIN_X
13187 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13188 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13189 { MASK, ICODE, NAME, ENUM },
13191 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13196 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13197 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13199 static const struct builtin_description bdesc_1arg
[] =
13201 #include "rs6000-builtin.def"
13204 /* Simple no-argument operations: result = __builtin_darn_32 () */
13206 #undef RS6000_BUILTIN_0
13207 #undef RS6000_BUILTIN_1
13208 #undef RS6000_BUILTIN_2
13209 #undef RS6000_BUILTIN_3
13210 #undef RS6000_BUILTIN_A
13211 #undef RS6000_BUILTIN_D
13212 #undef RS6000_BUILTIN_H
13213 #undef RS6000_BUILTIN_P
13214 #undef RS6000_BUILTIN_X
13216 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13217 { MASK, ICODE, NAME, ENUM },
13219 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13220 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13222 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13224 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13225 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13226 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13228 static const struct builtin_description bdesc_0arg
[] =
13230 #include "rs6000-builtin.def"
13233 /* HTM builtins. */
13234 #undef RS6000_BUILTIN_0
13235 #undef RS6000_BUILTIN_1
13236 #undef RS6000_BUILTIN_2
13237 #undef RS6000_BUILTIN_3
13238 #undef RS6000_BUILTIN_A
13239 #undef RS6000_BUILTIN_D
13240 #undef RS6000_BUILTIN_H
13241 #undef RS6000_BUILTIN_P
13242 #undef RS6000_BUILTIN_X
13244 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13245 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13247 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13248 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13249 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13250 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13251 { MASK, ICODE, NAME, ENUM },
13253 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13254 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13256 static const struct builtin_description bdesc_htm
[] =
13258 #include "rs6000-builtin.def"
13261 #undef RS6000_BUILTIN_0
13262 #undef RS6000_BUILTIN_1
13263 #undef RS6000_BUILTIN_2
13264 #undef RS6000_BUILTIN_3
13265 #undef RS6000_BUILTIN_A
13266 #undef RS6000_BUILTIN_D
13267 #undef RS6000_BUILTIN_H
13268 #undef RS6000_BUILTIN_P
13270 /* Return true if a builtin function is overloaded. */
13272 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13274 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13278 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13280 return rs6000_builtin_info
[(int)fncode
].name
;
13283 /* Expand an expression EXP that calls a builtin without arguments. */
13285 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13288 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13290 if (icode
== CODE_FOR_nothing
)
13291 /* Builtin not supported on this processor. */
13294 if (icode
== CODE_FOR_rs6000_mffsl
13295 && rs6000_isa_flags_explicit
& OPTION_MASK_SOFT_FLOAT
)
13297 error ("__builtin_mffsl() not supported with -msoft-float");
13302 || GET_MODE (target
) != tmode
13303 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13304 target
= gen_reg_rtx (tmode
);
13306 pat
= GEN_FCN (icode
) (target
);
13316 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13319 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13320 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13321 rtx op0
= expand_normal (arg0
);
13322 rtx op1
= expand_normal (arg1
);
13323 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13324 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13326 if (icode
== CODE_FOR_nothing
)
13327 /* Builtin not supported on this processor. */
13330 /* If we got invalid arguments bail out before generating bad rtl. */
13331 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13334 if (GET_CODE (op0
) != CONST_INT
13335 || INTVAL (op0
) > 255
13336 || INTVAL (op0
) < 0)
13338 error ("argument 1 must be an 8-bit field value");
13342 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13343 op0
= copy_to_mode_reg (mode0
, op0
);
13345 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13346 op1
= copy_to_mode_reg (mode1
, op1
);
13348 pat
= GEN_FCN (icode
) (op0
, op1
);
13357 rs6000_expand_mtfsb_builtin (enum insn_code icode
, tree exp
)
13360 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13361 rtx op0
= expand_normal (arg0
);
13363 if (icode
== CODE_FOR_nothing
)
13364 /* Builtin not supported on this processor. */
13367 if (rs6000_isa_flags_explicit
& OPTION_MASK_SOFT_FLOAT
)
13369 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13373 /* If we got invalid arguments bail out before generating bad rtl. */
13374 if (arg0
== error_mark_node
)
13377 /* Only allow bit numbers 0 to 31. */
13378 if (!u5bit_cint_operand (op0
, VOIDmode
))
13380 error ("Argument must be a constant between 0 and 31.");
13384 pat
= GEN_FCN (icode
) (op0
);
13393 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode
, tree exp
)
13396 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13397 rtx op0
= expand_normal (arg0
);
13398 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13400 if (icode
== CODE_FOR_nothing
)
13401 /* Builtin not supported on this processor. */
13404 if (rs6000_isa_flags_explicit
& OPTION_MASK_SOFT_FLOAT
)
13406 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13410 /* If we got invalid arguments bail out before generating bad rtl. */
13411 if (arg0
== error_mark_node
)
13414 /* If the argument is a constant, check the range. Argument can only be a
13415 2-bit value. Unfortunately, can't check the range of the value at
13416 compile time if the argument is a variable. The least significant two
13417 bits of the argument, regardless of type, are used to set the rounding
13418 mode. All other bits are ignored. */
13419 if (GET_CODE (op0
) == CONST_INT
&& !const_0_to_3_operand(op0
, VOIDmode
))
13421 error ("Argument must be a value between 0 and 3.");
13425 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13426 op0
= copy_to_mode_reg (mode0
, op0
);
13428 pat
= GEN_FCN (icode
) (op0
);
13436 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode
, tree exp
)
13439 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13440 rtx op0
= expand_normal (arg0
);
13441 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13444 /* Builtin not supported in 32-bit mode. */
13445 fatal_error (input_location
,
13446 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13448 if (rs6000_isa_flags_explicit
& OPTION_MASK_SOFT_FLOAT
)
13450 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13454 if (icode
== CODE_FOR_nothing
)
13455 /* Builtin not supported on this processor. */
13458 /* If we got invalid arguments bail out before generating bad rtl. */
13459 if (arg0
== error_mark_node
)
13462 /* If the argument is a constant, check the range. Agrument can only be a
13463 3-bit value. Unfortunately, can't check the range of the value at
13464 compile time if the argument is a variable. The least significant two
13465 bits of the argument, regardless of type, are used to set the rounding
13466 mode. All other bits are ignored. */
13467 if (GET_CODE (op0
) == CONST_INT
&& !const_0_to_7_operand(op0
, VOIDmode
))
13469 error ("Argument must be a value between 0 and 7.");
13473 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13474 op0
= copy_to_mode_reg (mode0
, op0
);
13476 pat
= GEN_FCN (icode
) (op0
);
13485 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13488 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13489 rtx op0
= expand_normal (arg0
);
13490 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13491 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13493 if (icode
== CODE_FOR_nothing
)
13494 /* Builtin not supported on this processor. */
13497 /* If we got invalid arguments bail out before generating bad rtl. */
13498 if (arg0
== error_mark_node
)
13501 if (icode
== CODE_FOR_altivec_vspltisb
13502 || icode
== CODE_FOR_altivec_vspltish
13503 || icode
== CODE_FOR_altivec_vspltisw
)
13505 /* Only allow 5-bit *signed* literals. */
13506 if (GET_CODE (op0
) != CONST_INT
13507 || INTVAL (op0
) > 15
13508 || INTVAL (op0
) < -16)
13510 error ("argument 1 must be a 5-bit signed literal");
13511 return CONST0_RTX (tmode
);
13516 || GET_MODE (target
) != tmode
13517 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13518 target
= gen_reg_rtx (tmode
);
13520 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13521 op0
= copy_to_mode_reg (mode0
, op0
);
13523 pat
= GEN_FCN (icode
) (target
, op0
);
13532 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
13534 rtx pat
, scratch1
, scratch2
;
13535 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13536 rtx op0
= expand_normal (arg0
);
13537 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13538 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13540 /* If we have invalid arguments, bail out before generating bad rtl. */
13541 if (arg0
== error_mark_node
)
13545 || GET_MODE (target
) != tmode
13546 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13547 target
= gen_reg_rtx (tmode
);
13549 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13550 op0
= copy_to_mode_reg (mode0
, op0
);
13552 scratch1
= gen_reg_rtx (mode0
);
13553 scratch2
= gen_reg_rtx (mode0
);
13555 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
13564 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13567 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13568 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13569 rtx op0
= expand_normal (arg0
);
13570 rtx op1
= expand_normal (arg1
);
13571 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13572 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13573 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13575 if (icode
== CODE_FOR_nothing
)
13576 /* Builtin not supported on this processor. */
13579 /* If we got invalid arguments bail out before generating bad rtl. */
13580 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13583 if (icode
== CODE_FOR_unpackv1ti
13584 || icode
== CODE_FOR_unpackkf
13585 || icode
== CODE_FOR_unpacktf
13586 || icode
== CODE_FOR_unpackif
13587 || icode
== CODE_FOR_unpacktd
)
13589 /* Only allow 1-bit unsigned literals. */
13591 if (TREE_CODE (arg1
) != INTEGER_CST
13592 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
13594 error ("argument 2 must be a 1-bit unsigned literal");
13595 return CONST0_RTX (tmode
);
13598 else if (icode
== CODE_FOR_altivec_vspltw
)
13600 /* Only allow 2-bit unsigned literals. */
13602 if (TREE_CODE (arg1
) != INTEGER_CST
13603 || TREE_INT_CST_LOW (arg1
) & ~3)
13605 error ("argument 2 must be a 2-bit unsigned literal");
13606 return CONST0_RTX (tmode
);
13609 else if (icode
== CODE_FOR_altivec_vsplth
)
13611 /* Only allow 3-bit unsigned literals. */
13613 if (TREE_CODE (arg1
) != INTEGER_CST
13614 || TREE_INT_CST_LOW (arg1
) & ~7)
13616 error ("argument 2 must be a 3-bit unsigned literal");
13617 return CONST0_RTX (tmode
);
13620 else if (icode
== CODE_FOR_altivec_vspltb
)
13622 /* Only allow 4-bit unsigned literals. */
13624 if (TREE_CODE (arg1
) != INTEGER_CST
13625 || TREE_INT_CST_LOW (arg1
) & ~15)
13627 error ("argument 2 must be a 4-bit unsigned literal");
13628 return CONST0_RTX (tmode
);
13631 else if (icode
== CODE_FOR_altivec_vcfux
13632 || icode
== CODE_FOR_altivec_vcfsx
13633 || icode
== CODE_FOR_altivec_vctsxs
13634 || icode
== CODE_FOR_altivec_vctuxs
)
13636 /* Only allow 5-bit unsigned literals. */
13638 if (TREE_CODE (arg1
) != INTEGER_CST
13639 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
13641 error ("argument 2 must be a 5-bit unsigned literal");
13642 return CONST0_RTX (tmode
);
13645 else if (icode
== CODE_FOR_dfptstsfi_eq_dd
13646 || icode
== CODE_FOR_dfptstsfi_lt_dd
13647 || icode
== CODE_FOR_dfptstsfi_gt_dd
13648 || icode
== CODE_FOR_dfptstsfi_unordered_dd
13649 || icode
== CODE_FOR_dfptstsfi_eq_td
13650 || icode
== CODE_FOR_dfptstsfi_lt_td
13651 || icode
== CODE_FOR_dfptstsfi_gt_td
13652 || icode
== CODE_FOR_dfptstsfi_unordered_td
)
13654 /* Only allow 6-bit unsigned literals. */
13656 if (TREE_CODE (arg0
) != INTEGER_CST
13657 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
13659 error ("argument 1 must be a 6-bit unsigned literal");
13660 return CONST0_RTX (tmode
);
13663 else if (icode
== CODE_FOR_xststdcqp_kf
13664 || icode
== CODE_FOR_xststdcqp_tf
13665 || icode
== CODE_FOR_xststdcdp
13666 || icode
== CODE_FOR_xststdcsp
13667 || icode
== CODE_FOR_xvtstdcdp
13668 || icode
== CODE_FOR_xvtstdcsp
)
13670 /* Only allow 7-bit unsigned literals. */
13672 if (TREE_CODE (arg1
) != INTEGER_CST
13673 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
13675 error ("argument 2 must be a 7-bit unsigned literal");
13676 return CONST0_RTX (tmode
);
13681 || GET_MODE (target
) != tmode
13682 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13683 target
= gen_reg_rtx (tmode
);
13685 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13686 op0
= copy_to_mode_reg (mode0
, op0
);
13687 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13688 op1
= copy_to_mode_reg (mode1
, op1
);
13690 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
13699 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
13702 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
13703 tree arg0
= CALL_EXPR_ARG (exp
, 1);
13704 tree arg1
= CALL_EXPR_ARG (exp
, 2);
13705 rtx op0
= expand_normal (arg0
);
13706 rtx op1
= expand_normal (arg1
);
13707 machine_mode tmode
= SImode
;
13708 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13709 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13712 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
13714 error ("argument 1 of %qs must be a constant",
13715 "__builtin_altivec_predicate");
13719 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
13721 gcc_assert (mode0
== mode1
);
13723 /* If we have invalid arguments, bail out before generating bad rtl. */
13724 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13728 || GET_MODE (target
) != tmode
13729 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13730 target
= gen_reg_rtx (tmode
);
13732 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13733 op0
= copy_to_mode_reg (mode0
, op0
);
13734 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13735 op1
= copy_to_mode_reg (mode1
, op1
);
13737 /* Note that for many of the relevant operations (e.g. cmpne or
13738 cmpeq) with float or double operands, it makes more sense for the
13739 mode of the allocated scratch register to select a vector of
13740 integer. But the choice to copy the mode of operand 0 was made
13741 long ago and there are no plans to change it. */
13742 scratch
= gen_reg_rtx (mode0
);
13744 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13749 /* The vec_any* and vec_all* predicates use the same opcodes for two
13750 different operations, but the bits in CR6 will be different
13751 depending on what information we want. So we have to play tricks
13752 with CR6 to get the right bits out.
13754 If you think this is disgusting, look at the specs for the
13755 AltiVec predicates. */
13757 switch (cr6_form_int
)
13760 emit_insn (gen_cr6_test_for_zero (target
));
13763 emit_insn (gen_cr6_test_for_zero_reverse (target
));
13766 emit_insn (gen_cr6_test_for_lt (target
));
13769 emit_insn (gen_cr6_test_for_lt_reverse (target
));
13772 error ("argument 1 of %qs is out of range",
13773 "__builtin_altivec_predicate");
13781 swap_endian_selector_for_mode (machine_mode mode
)
13783 unsigned int swap1
[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13784 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13785 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13786 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13788 unsigned int *swaparray
, i
;
13808 gcc_unreachable ();
13811 for (i
= 0; i
< 16; ++i
)
13812 perm
[i
] = GEN_INT (swaparray
[i
]);
13814 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
,
13815 gen_rtvec_v (16, perm
)));
13819 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
13822 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13823 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13824 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13825 machine_mode mode0
= Pmode
;
13826 machine_mode mode1
= Pmode
;
13827 rtx op0
= expand_normal (arg0
);
13828 rtx op1
= expand_normal (arg1
);
13830 if (icode
== CODE_FOR_nothing
)
13831 /* Builtin not supported on this processor. */
13834 /* If we got invalid arguments bail out before generating bad rtl. */
13835 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13839 || GET_MODE (target
) != tmode
13840 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13841 target
= gen_reg_rtx (tmode
);
13843 op1
= copy_to_mode_reg (mode1
, op1
);
13845 /* For LVX, express the RTL accurately by ANDing the address with -16.
13846 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13847 so the raw address is fine. */
13848 if (icode
== CODE_FOR_altivec_lvx_v1ti
13849 || icode
== CODE_FOR_altivec_lvx_v2df
13850 || icode
== CODE_FOR_altivec_lvx_v2di
13851 || icode
== CODE_FOR_altivec_lvx_v4sf
13852 || icode
== CODE_FOR_altivec_lvx_v4si
13853 || icode
== CODE_FOR_altivec_lvx_v8hi
13854 || icode
== CODE_FOR_altivec_lvx_v16qi
)
13857 if (op0
== const0_rtx
)
13861 op0
= copy_to_mode_reg (mode0
, op0
);
13862 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
13864 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
13865 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
13867 emit_insn (gen_rtx_SET (target
, addr
));
13871 if (op0
== const0_rtx
)
13872 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
13875 op0
= copy_to_mode_reg (mode0
, op0
);
13876 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
13877 gen_rtx_PLUS (Pmode
, op1
, op0
));
13880 pat
= GEN_FCN (icode
) (target
, addr
);
13890 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
13893 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13894 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13895 tree arg2
= CALL_EXPR_ARG (exp
, 2);
13896 rtx op0
= expand_normal (arg0
);
13897 rtx op1
= expand_normal (arg1
);
13898 rtx op2
= expand_normal (arg2
);
13899 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13900 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13901 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
13903 if (icode
== CODE_FOR_nothing
)
13904 /* Builtin not supported on this processor. */
13907 /* If we got invalid arguments bail out before generating bad rtl. */
13908 if (arg0
== error_mark_node
13909 || arg1
== error_mark_node
13910 || arg2
== error_mark_node
)
13913 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13914 op0
= copy_to_mode_reg (mode0
, op0
);
13915 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13916 op1
= copy_to_mode_reg (mode1
, op1
);
13917 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
13918 op2
= copy_to_mode_reg (mode2
, op2
);
13920 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
13928 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
13930 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13931 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13932 tree arg2
= CALL_EXPR_ARG (exp
, 2);
13933 rtx op0
= expand_normal (arg0
);
13934 rtx op1
= expand_normal (arg1
);
13935 rtx op2
= expand_normal (arg2
);
13936 rtx pat
, addr
, rawaddr
;
13937 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13938 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
13939 machine_mode mode1
= Pmode
;
13940 machine_mode mode2
= Pmode
;
13942 /* Invalid arguments. Bail before doing anything stoopid! */
13943 if (arg0
== error_mark_node
13944 || arg1
== error_mark_node
13945 || arg2
== error_mark_node
)
13948 op2
= copy_to_mode_reg (mode2
, op2
);
13950 /* For STVX, express the RTL accurately by ANDing the address with -16.
13951 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13952 so the raw address is fine. */
13953 if (icode
== CODE_FOR_altivec_stvx_v2df
13954 || icode
== CODE_FOR_altivec_stvx_v2di
13955 || icode
== CODE_FOR_altivec_stvx_v4sf
13956 || icode
== CODE_FOR_altivec_stvx_v4si
13957 || icode
== CODE_FOR_altivec_stvx_v8hi
13958 || icode
== CODE_FOR_altivec_stvx_v16qi
)
13960 if (op1
== const0_rtx
)
13964 op1
= copy_to_mode_reg (mode1
, op1
);
13965 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
13968 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
13969 addr
= gen_rtx_MEM (tmode
, addr
);
13971 op0
= copy_to_mode_reg (tmode
, op0
);
13973 emit_insn (gen_rtx_SET (addr
, op0
));
13977 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
13978 op0
= copy_to_mode_reg (smode
, op0
);
13980 if (op1
== const0_rtx
)
13981 addr
= gen_rtx_MEM (tmode
, op2
);
13984 op1
= copy_to_mode_reg (mode1
, op1
);
13985 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
13988 pat
= GEN_FCN (icode
) (addr
, op0
);
13996 /* Return the appropriate SPR number associated with the given builtin. */
13997 static inline HOST_WIDE_INT
13998 htm_spr_num (enum rs6000_builtins code
)
14000 if (code
== HTM_BUILTIN_GET_TFHAR
14001 || code
== HTM_BUILTIN_SET_TFHAR
)
14003 else if (code
== HTM_BUILTIN_GET_TFIAR
14004 || code
== HTM_BUILTIN_SET_TFIAR
)
14006 else if (code
== HTM_BUILTIN_GET_TEXASR
14007 || code
== HTM_BUILTIN_SET_TEXASR
)
14009 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14010 || code
== HTM_BUILTIN_SET_TEXASRU
);
14011 return TEXASRU_SPR
;
14014 /* Return the appropriate SPR regno associated with the given builtin. */
14015 static inline HOST_WIDE_INT
14016 htm_spr_regno (enum rs6000_builtins code
)
14018 if (code
== HTM_BUILTIN_GET_TFHAR
14019 || code
== HTM_BUILTIN_SET_TFHAR
)
14020 return TFHAR_REGNO
;
14021 else if (code
== HTM_BUILTIN_GET_TFIAR
14022 || code
== HTM_BUILTIN_SET_TFIAR
)
14023 return TFIAR_REGNO
;
14024 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14025 || code
== HTM_BUILTIN_SET_TEXASR
14026 || code
== HTM_BUILTIN_GET_TEXASRU
14027 || code
== HTM_BUILTIN_SET_TEXASRU
);
14028 return TEXASR_REGNO
;
14031 /* Return the correct ICODE value depending on whether we are
14032 setting or reading the HTM SPRs. */
14033 static inline enum insn_code
14034 rs6000_htm_spr_icode (bool nonvoid
)
14037 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14039 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14042 /* Expand the HTM builtin in EXP and store the result in TARGET.
14043 Store true in *EXPANDEDP if we found a builtin to expand. */
14045 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14047 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14048 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14049 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14050 const struct builtin_description
*d
;
14055 if (!TARGET_POWERPC64
14056 && (fcode
== HTM_BUILTIN_TABORTDC
14057 || fcode
== HTM_BUILTIN_TABORTDCI
))
14059 size_t uns_fcode
= (size_t)fcode
;
14060 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14061 error ("builtin %qs is only valid in 64-bit mode", name
);
14065 /* Expand the HTM builtins. */
14067 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14068 if (d
->code
== fcode
)
14070 rtx op
[MAX_HTM_OPERANDS
], pat
;
14073 call_expr_arg_iterator iter
;
14074 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14075 enum insn_code icode
= d
->icode
;
14076 const struct insn_operand_data
*insn_op
;
14077 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14081 icode
= rs6000_htm_spr_icode (nonvoid
);
14082 insn_op
= &insn_data
[icode
].operand
[0];
14086 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: E_SImode
;
14088 || GET_MODE (target
) != tmode
14089 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14090 target
= gen_reg_rtx (tmode
);
14092 op
[nopnds
++] = target
;
14095 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14097 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14100 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14102 op
[nopnds
] = expand_normal (arg
);
14104 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14106 if (!strcmp (insn_op
->constraint
, "n"))
14108 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14109 if (!CONST_INT_P (op
[nopnds
]))
14110 error ("argument %d must be an unsigned literal", arg_num
);
14112 error ("argument %d is an unsigned literal that is "
14113 "out of range", arg_num
);
14116 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14122 /* Handle the builtins for extended mnemonics. These accept
14123 no arguments, but map to builtins that take arguments. */
14126 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14127 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14128 op
[nopnds
++] = GEN_INT (1);
14130 attr
|= RS6000_BTC_UNARY
;
14132 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14133 op
[nopnds
++] = GEN_INT (0);
14135 attr
|= RS6000_BTC_UNARY
;
14141 /* If this builtin accesses SPRs, then pass in the appropriate
14142 SPR number and SPR regno as the last two operands. */
14145 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14146 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14147 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14149 /* If this builtin accesses a CR, then pass in a scratch
14150 CR as the last operand. */
14151 else if (attr
& RS6000_BTC_CR
)
14152 { cr
= gen_reg_rtx (CCmode
);
14158 int expected_nopnds
= 0;
14159 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14160 expected_nopnds
= 1;
14161 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14162 expected_nopnds
= 2;
14163 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14164 expected_nopnds
= 3;
14165 if (!(attr
& RS6000_BTC_VOID
))
14166 expected_nopnds
+= 1;
14168 expected_nopnds
+= 2;
14170 gcc_assert (nopnds
== expected_nopnds
14171 && nopnds
<= MAX_HTM_OPERANDS
);
14177 pat
= GEN_FCN (icode
) (op
[0]);
14180 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14183 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14186 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14189 gcc_unreachable ();
14195 if (attr
& RS6000_BTC_CR
)
14197 if (fcode
== HTM_BUILTIN_TBEGIN
)
14199 /* Emit code to set TARGET to true or false depending on
14200 whether the tbegin. instruction successfully or failed
14201 to start a transaction. We do this by placing the 1's
14202 complement of CR's EQ bit into TARGET. */
14203 rtx scratch
= gen_reg_rtx (SImode
);
14204 emit_insn (gen_rtx_SET (scratch
,
14205 gen_rtx_EQ (SImode
, cr
,
14207 emit_insn (gen_rtx_SET (target
,
14208 gen_rtx_XOR (SImode
, scratch
,
14213 /* Emit code to copy the 4-bit condition register field
14214 CR into the least significant end of register TARGET. */
14215 rtx scratch1
= gen_reg_rtx (SImode
);
14216 rtx scratch2
= gen_reg_rtx (SImode
);
14217 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14218 emit_insn (gen_movcc (subreg
, cr
));
14219 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14220 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14229 *expandedp
= false;
14233 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14236 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14239 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14240 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14243 if (target
== 0 || GET_MODE (target
) != SImode
)
14244 target
= gen_reg_rtx (SImode
);
14246 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14247 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14248 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14249 to a STRING_CST. */
14250 if (TREE_CODE (arg
) == ARRAY_REF
14251 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14252 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14253 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14254 arg
= TREE_OPERAND (arg
, 0);
14256 if (TREE_CODE (arg
) != STRING_CST
)
14258 error ("builtin %qs only accepts a string argument",
14259 rs6000_builtin_info
[(size_t) fcode
].name
);
14263 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14265 const char *cpu
= TREE_STRING_POINTER (arg
);
14266 rtx cpuid
= NULL_RTX
;
14267 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14268 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14270 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14271 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14274 if (cpuid
== NULL_RTX
)
14276 /* Invalid CPU argument. */
14277 error ("cpu %qs is an invalid argument to builtin %qs",
14278 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14282 rtx platform
= gen_reg_rtx (SImode
);
14283 rtx tcbmem
= gen_const_mem (SImode
,
14284 gen_rtx_PLUS (Pmode
,
14285 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14286 GEN_INT (TCB_PLATFORM_OFFSET
)));
14287 emit_move_insn (platform
, tcbmem
);
14288 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14290 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14292 const char *hwcap
= TREE_STRING_POINTER (arg
);
14293 rtx mask
= NULL_RTX
;
14295 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14296 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14298 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14299 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14302 if (mask
== NULL_RTX
)
14304 /* Invalid HWCAP argument. */
14305 error ("%s %qs is an invalid argument to builtin %qs",
14306 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14310 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14311 rtx tcbmem
= gen_const_mem (SImode
,
14312 gen_rtx_PLUS (Pmode
,
14313 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14314 GEN_INT (hwcap_offset
)));
14315 emit_move_insn (tcb_hwcap
, tcbmem
);
14316 rtx scratch1
= gen_reg_rtx (SImode
);
14317 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14318 rtx scratch2
= gen_reg_rtx (SImode
);
14319 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14320 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14323 gcc_unreachable ();
14325 /* Record that we have expanded a CPU builtin, so that we can later
14326 emit a reference to the special symbol exported by LIBC to ensure we
14327 do not link against an old LIBC that doesn't support this feature. */
14328 cpu_builtin_p
= true;
14331 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14332 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14334 /* For old LIBCs, always return FALSE. */
14335 emit_move_insn (target
, GEN_INT (0));
14336 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14342 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14345 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14346 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14347 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14348 rtx op0
= expand_normal (arg0
);
14349 rtx op1
= expand_normal (arg1
);
14350 rtx op2
= expand_normal (arg2
);
14351 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14352 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14353 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14354 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
14356 if (icode
== CODE_FOR_nothing
)
14357 /* Builtin not supported on this processor. */
14360 /* If we got invalid arguments bail out before generating bad rtl. */
14361 if (arg0
== error_mark_node
14362 || arg1
== error_mark_node
14363 || arg2
== error_mark_node
)
14366 /* Check and prepare argument depending on the instruction code.
14368 Note that a switch statement instead of the sequence of tests
14369 would be incorrect as many of the CODE_FOR values could be
14370 CODE_FOR_nothing and that would yield multiple alternatives
14371 with identical values. We'd never reach here at runtime in
14373 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
14374 || icode
== CODE_FOR_altivec_vsldoi_v2df
14375 || icode
== CODE_FOR_altivec_vsldoi_v4si
14376 || icode
== CODE_FOR_altivec_vsldoi_v8hi
14377 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
14379 /* Only allow 4-bit unsigned literals. */
14381 if (TREE_CODE (arg2
) != INTEGER_CST
14382 || TREE_INT_CST_LOW (arg2
) & ~0xf)
14384 error ("argument 3 must be a 4-bit unsigned literal");
14385 return CONST0_RTX (tmode
);
14388 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
14389 || icode
== CODE_FOR_vsx_xxpermdi_v2di
14390 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
14391 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
14392 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
14393 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
14394 || icode
== CODE_FOR_vsx_xxpermdi_v4si
14395 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
14396 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
14397 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
14398 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
14399 || icode
== CODE_FOR_vsx_xxsldwi_v4si
14400 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
14401 || icode
== CODE_FOR_vsx_xxsldwi_v2di
14402 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
14404 /* Only allow 2-bit unsigned literals. */
14406 if (TREE_CODE (arg2
) != INTEGER_CST
14407 || TREE_INT_CST_LOW (arg2
) & ~0x3)
14409 error ("argument 3 must be a 2-bit unsigned literal");
14410 return CONST0_RTX (tmode
);
14413 else if (icode
== CODE_FOR_vsx_set_v2df
14414 || icode
== CODE_FOR_vsx_set_v2di
14415 || icode
== CODE_FOR_bcdadd
14416 || icode
== CODE_FOR_bcdadd_lt
14417 || icode
== CODE_FOR_bcdadd_eq
14418 || icode
== CODE_FOR_bcdadd_gt
14419 || icode
== CODE_FOR_bcdsub
14420 || icode
== CODE_FOR_bcdsub_lt
14421 || icode
== CODE_FOR_bcdsub_eq
14422 || icode
== CODE_FOR_bcdsub_gt
)
14424 /* Only allow 1-bit unsigned literals. */
14426 if (TREE_CODE (arg2
) != INTEGER_CST
14427 || TREE_INT_CST_LOW (arg2
) & ~0x1)
14429 error ("argument 3 must be a 1-bit unsigned literal");
14430 return CONST0_RTX (tmode
);
14433 else if (icode
== CODE_FOR_dfp_ddedpd_dd
14434 || icode
== CODE_FOR_dfp_ddedpd_td
)
14436 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14438 if (TREE_CODE (arg0
) != INTEGER_CST
14439 || TREE_INT_CST_LOW (arg2
) & ~0x3)
14441 error ("argument 1 must be 0 or 2");
14442 return CONST0_RTX (tmode
);
14445 else if (icode
== CODE_FOR_dfp_denbcd_dd
14446 || icode
== CODE_FOR_dfp_denbcd_td
)
14448 /* Only allow 1-bit unsigned literals. */
14450 if (TREE_CODE (arg0
) != INTEGER_CST
14451 || TREE_INT_CST_LOW (arg0
) & ~0x1)
14453 error ("argument 1 must be a 1-bit unsigned literal");
14454 return CONST0_RTX (tmode
);
14457 else if (icode
== CODE_FOR_dfp_dscli_dd
14458 || icode
== CODE_FOR_dfp_dscli_td
14459 || icode
== CODE_FOR_dfp_dscri_dd
14460 || icode
== CODE_FOR_dfp_dscri_td
)
14462 /* Only allow 6-bit unsigned literals. */
14464 if (TREE_CODE (arg1
) != INTEGER_CST
14465 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
14467 error ("argument 2 must be a 6-bit unsigned literal");
14468 return CONST0_RTX (tmode
);
14471 else if (icode
== CODE_FOR_crypto_vshasigmaw
14472 || icode
== CODE_FOR_crypto_vshasigmad
)
14474 /* Check whether the 2nd and 3rd arguments are integer constants and in
14475 range and prepare arguments. */
14477 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (wi::to_wide (arg1
), 2))
14479 error ("argument 2 must be 0 or 1");
14480 return CONST0_RTX (tmode
);
14484 if (TREE_CODE (arg2
) != INTEGER_CST
14485 || wi::geu_p (wi::to_wide (arg2
), 16))
14487 error ("argument 3 must be in the range 0..15");
14488 return CONST0_RTX (tmode
);
14493 || GET_MODE (target
) != tmode
14494 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14495 target
= gen_reg_rtx (tmode
);
14497 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14498 op0
= copy_to_mode_reg (mode0
, op0
);
14499 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14500 op1
= copy_to_mode_reg (mode1
, op1
);
14501 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14502 op2
= copy_to_mode_reg (mode2
, op2
);
14504 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
14513 /* Expand the dst builtins. */
14515 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
14518 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14519 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14520 tree arg0
, arg1
, arg2
;
14521 machine_mode mode0
, mode1
;
14522 rtx pat
, op0
, op1
, op2
;
14523 const struct builtin_description
*d
;
14526 *expandedp
= false;
14528 /* Handle DST variants. */
14530 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
14531 if (d
->code
== fcode
)
14533 arg0
= CALL_EXPR_ARG (exp
, 0);
14534 arg1
= CALL_EXPR_ARG (exp
, 1);
14535 arg2
= CALL_EXPR_ARG (exp
, 2);
14536 op0
= expand_normal (arg0
);
14537 op1
= expand_normal (arg1
);
14538 op2
= expand_normal (arg2
);
14539 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
14540 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
14542 /* Invalid arguments, bail out before generating bad rtl. */
14543 if (arg0
== error_mark_node
14544 || arg1
== error_mark_node
14545 || arg2
== error_mark_node
)
14550 if (TREE_CODE (arg2
) != INTEGER_CST
14551 || TREE_INT_CST_LOW (arg2
) & ~0x3)
14553 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
14557 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
14558 op0
= copy_to_mode_reg (Pmode
, op0
);
14559 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
14560 op1
= copy_to_mode_reg (mode1
, op1
);
14562 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
14572 /* Expand vec_init builtin. */
14574 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
14576 machine_mode tmode
= TYPE_MODE (type
);
14577 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
14578 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
14580 gcc_assert (VECTOR_MODE_P (tmode
));
14581 gcc_assert (n_elt
== call_expr_nargs (exp
));
14583 if (!target
|| !register_operand (target
, tmode
))
14584 target
= gen_reg_rtx (tmode
);
14586 /* If we have a vector compromised of a single element, such as V1TImode, do
14587 the initialization directly. */
14588 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
14590 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
14591 emit_move_insn (target
, gen_lowpart (tmode
, x
));
14595 rtvec v
= rtvec_alloc (n_elt
);
14597 for (i
= 0; i
< n_elt
; ++i
)
14599 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
14600 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
14603 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
14609 /* Return the integer constant in ARG. Constrain it to be in the range
14610 of the subparts of VEC_TYPE; issue an error if not. */
14613 get_element_number (tree vec_type
, tree arg
)
14615 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
14617 if (!tree_fits_uhwi_p (arg
)
14618 || (elt
= tree_to_uhwi (arg
), elt
> max
))
14620 error ("selector must be an integer constant in the range 0..%wi", max
);
14627 /* Expand vec_set builtin. */
14629 altivec_expand_vec_set_builtin (tree exp
)
14631 machine_mode tmode
, mode1
;
14632 tree arg0
, arg1
, arg2
;
14636 arg0
= CALL_EXPR_ARG (exp
, 0);
14637 arg1
= CALL_EXPR_ARG (exp
, 1);
14638 arg2
= CALL_EXPR_ARG (exp
, 2);
14640 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
14641 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
14642 gcc_assert (VECTOR_MODE_P (tmode
));
14644 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
14645 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
14646 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
14648 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
14649 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
14651 op0
= force_reg (tmode
, op0
);
14652 op1
= force_reg (mode1
, op1
);
14654 rs6000_expand_vector_set (op0
, op1
, elt
);
14659 /* Expand vec_ext builtin. */
14661 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
14663 machine_mode tmode
, mode0
;
14668 arg0
= CALL_EXPR_ARG (exp
, 0);
14669 arg1
= CALL_EXPR_ARG (exp
, 1);
14671 op0
= expand_normal (arg0
);
14672 op1
= expand_normal (arg1
);
14674 /* Call get_element_number to validate arg1 if it is a constant. */
14675 if (TREE_CODE (arg1
) == INTEGER_CST
)
14676 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
14678 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
14679 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
14680 gcc_assert (VECTOR_MODE_P (mode0
));
14682 op0
= force_reg (mode0
, op0
);
14684 if (optimize
|| !target
|| !register_operand (target
, tmode
))
14685 target
= gen_reg_rtx (tmode
);
14687 rs6000_expand_vector_extract (target
, op0
, op1
);
14692 /* Expand the builtin in EXP and store the result in TARGET. Store
14693 true in *EXPANDEDP if we found a builtin to expand. */
14695 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
14697 const struct builtin_description
*d
;
14699 enum insn_code icode
;
14700 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14701 tree arg0
, arg1
, arg2
;
14703 machine_mode tmode
, mode0
;
14704 enum rs6000_builtins fcode
14705 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14707 if (rs6000_overloaded_builtin_p (fcode
))
14710 error ("unresolved overload for Altivec builtin %qF", fndecl
);
14712 /* Given it is invalid, just generate a normal call. */
14713 return expand_call (exp
, target
, false);
14716 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
14724 case ALTIVEC_BUILTIN_STVX_V2DF
:
14725 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df
, exp
);
14726 case ALTIVEC_BUILTIN_STVX_V2DI
:
14727 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di
, exp
);
14728 case ALTIVEC_BUILTIN_STVX_V4SF
:
14729 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf
, exp
);
14730 case ALTIVEC_BUILTIN_STVX
:
14731 case ALTIVEC_BUILTIN_STVX_V4SI
:
14732 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
14733 case ALTIVEC_BUILTIN_STVX_V8HI
:
14734 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi
, exp
);
14735 case ALTIVEC_BUILTIN_STVX_V16QI
:
14736 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi
, exp
);
14737 case ALTIVEC_BUILTIN_STVEBX
:
14738 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
14739 case ALTIVEC_BUILTIN_STVEHX
:
14740 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
14741 case ALTIVEC_BUILTIN_STVEWX
:
14742 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
14743 case ALTIVEC_BUILTIN_STVXL_V2DF
:
14744 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
14745 case ALTIVEC_BUILTIN_STVXL_V2DI
:
14746 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
14747 case ALTIVEC_BUILTIN_STVXL_V4SF
:
14748 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
14749 case ALTIVEC_BUILTIN_STVXL
:
14750 case ALTIVEC_BUILTIN_STVXL_V4SI
:
14751 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
14752 case ALTIVEC_BUILTIN_STVXL_V8HI
:
14753 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
14754 case ALTIVEC_BUILTIN_STVXL_V16QI
:
14755 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
14757 case ALTIVEC_BUILTIN_STVLX
:
14758 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
14759 case ALTIVEC_BUILTIN_STVLXL
:
14760 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
14761 case ALTIVEC_BUILTIN_STVRX
:
14762 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
14763 case ALTIVEC_BUILTIN_STVRXL
:
14764 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
14766 case P9V_BUILTIN_STXVL
:
14767 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
14769 case P9V_BUILTIN_XST_LEN_R
:
14770 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r
, exp
);
14772 case VSX_BUILTIN_STXVD2X_V1TI
:
14773 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
14774 case VSX_BUILTIN_STXVD2X_V2DF
:
14775 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
14776 case VSX_BUILTIN_STXVD2X_V2DI
:
14777 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
14778 case VSX_BUILTIN_STXVW4X_V4SF
:
14779 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
14780 case VSX_BUILTIN_STXVW4X_V4SI
:
14781 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
14782 case VSX_BUILTIN_STXVW4X_V8HI
:
14783 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
14784 case VSX_BUILTIN_STXVW4X_V16QI
:
14785 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
14787 /* For the following on big endian, it's ok to use any appropriate
14788 unaligned-supporting store, so use a generic expander. For
14789 little-endian, the exact element-reversing instruction must
14791 case VSX_BUILTIN_ST_ELEMREV_V1TI
:
14793 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v1ti
14794 : CODE_FOR_vsx_st_elemrev_v1ti
);
14795 return altivec_expand_stv_builtin (code
, exp
);
14797 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
14799 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
14800 : CODE_FOR_vsx_st_elemrev_v2df
);
14801 return altivec_expand_stv_builtin (code
, exp
);
14803 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
14805 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
14806 : CODE_FOR_vsx_st_elemrev_v2di
);
14807 return altivec_expand_stv_builtin (code
, exp
);
14809 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
14811 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
14812 : CODE_FOR_vsx_st_elemrev_v4sf
);
14813 return altivec_expand_stv_builtin (code
, exp
);
14815 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
14817 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
14818 : CODE_FOR_vsx_st_elemrev_v4si
);
14819 return altivec_expand_stv_builtin (code
, exp
);
14821 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
14823 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
14824 : CODE_FOR_vsx_st_elemrev_v8hi
);
14825 return altivec_expand_stv_builtin (code
, exp
);
14827 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
14829 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
14830 : CODE_FOR_vsx_st_elemrev_v16qi
);
14831 return altivec_expand_stv_builtin (code
, exp
);
14834 case ALTIVEC_BUILTIN_MFVSCR
:
14835 icode
= CODE_FOR_altivec_mfvscr
;
14836 tmode
= insn_data
[icode
].operand
[0].mode
;
14839 || GET_MODE (target
) != tmode
14840 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14841 target
= gen_reg_rtx (tmode
);
14843 pat
= GEN_FCN (icode
) (target
);
14849 case ALTIVEC_BUILTIN_MTVSCR
:
14850 icode
= CODE_FOR_altivec_mtvscr
;
14851 arg0
= CALL_EXPR_ARG (exp
, 0);
14852 op0
= expand_normal (arg0
);
14853 mode0
= insn_data
[icode
].operand
[0].mode
;
14855 /* If we got invalid arguments bail out before generating bad rtl. */
14856 if (arg0
== error_mark_node
)
14859 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
14860 op0
= copy_to_mode_reg (mode0
, op0
);
14862 pat
= GEN_FCN (icode
) (op0
);
14867 case ALTIVEC_BUILTIN_DSSALL
:
14868 emit_insn (gen_altivec_dssall ());
14871 case ALTIVEC_BUILTIN_DSS
:
14872 icode
= CODE_FOR_altivec_dss
;
14873 arg0
= CALL_EXPR_ARG (exp
, 0);
14875 op0
= expand_normal (arg0
);
14876 mode0
= insn_data
[icode
].operand
[0].mode
;
14878 /* If we got invalid arguments bail out before generating bad rtl. */
14879 if (arg0
== error_mark_node
)
14882 if (TREE_CODE (arg0
) != INTEGER_CST
14883 || TREE_INT_CST_LOW (arg0
) & ~0x3)
14885 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14889 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
14890 op0
= copy_to_mode_reg (mode0
, op0
);
14892 emit_insn (gen_altivec_dss (op0
));
14895 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
14896 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
14897 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
14898 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
14899 case VSX_BUILTIN_VEC_INIT_V2DF
:
14900 case VSX_BUILTIN_VEC_INIT_V2DI
:
14901 case VSX_BUILTIN_VEC_INIT_V1TI
:
14902 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
14904 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
14905 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
14906 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
14907 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
14908 case VSX_BUILTIN_VEC_SET_V2DF
:
14909 case VSX_BUILTIN_VEC_SET_V2DI
:
14910 case VSX_BUILTIN_VEC_SET_V1TI
:
14911 return altivec_expand_vec_set_builtin (exp
);
14913 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
14914 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
14915 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
14916 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
14917 case VSX_BUILTIN_VEC_EXT_V2DF
:
14918 case VSX_BUILTIN_VEC_EXT_V2DI
:
14919 case VSX_BUILTIN_VEC_EXT_V1TI
:
14920 return altivec_expand_vec_ext_builtin (exp
, target
);
14922 case P9V_BUILTIN_VEC_EXTRACT4B
:
14923 arg1
= CALL_EXPR_ARG (exp
, 1);
14926 /* Generate a normal call if it is invalid. */
14927 if (arg1
== error_mark_node
)
14928 return expand_call (exp
, target
, false);
14930 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
14932 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14933 return expand_call (exp
, target
, false);
14937 case P9V_BUILTIN_VEC_INSERT4B
:
14938 arg2
= CALL_EXPR_ARG (exp
, 2);
14941 /* Generate a normal call if it is invalid. */
14942 if (arg2
== error_mark_node
)
14943 return expand_call (exp
, target
, false);
14945 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
14947 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14948 return expand_call (exp
, target
, false);
14954 /* Fall through. */
14957 /* Expand abs* operations. */
14959 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
14960 if (d
->code
== fcode
)
14961 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
14963 /* Expand the AltiVec predicates. */
14964 d
= bdesc_altivec_preds
;
14965 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
14966 if (d
->code
== fcode
)
14967 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
14969 /* LV* are funky. We initialized them differently. */
14972 case ALTIVEC_BUILTIN_LVSL
:
14973 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
14974 exp
, target
, false);
14975 case ALTIVEC_BUILTIN_LVSR
:
14976 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
14977 exp
, target
, false);
14978 case ALTIVEC_BUILTIN_LVEBX
:
14979 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
14980 exp
, target
, false);
14981 case ALTIVEC_BUILTIN_LVEHX
:
14982 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
14983 exp
, target
, false);
14984 case ALTIVEC_BUILTIN_LVEWX
:
14985 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
14986 exp
, target
, false);
14987 case ALTIVEC_BUILTIN_LVXL_V2DF
:
14988 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
14989 exp
, target
, false);
14990 case ALTIVEC_BUILTIN_LVXL_V2DI
:
14991 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
14992 exp
, target
, false);
14993 case ALTIVEC_BUILTIN_LVXL_V4SF
:
14994 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
14995 exp
, target
, false);
14996 case ALTIVEC_BUILTIN_LVXL
:
14997 case ALTIVEC_BUILTIN_LVXL_V4SI
:
14998 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
14999 exp
, target
, false);
15000 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15001 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15002 exp
, target
, false);
15003 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15004 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15005 exp
, target
, false);
15006 case ALTIVEC_BUILTIN_LVX_V1TI
:
15007 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti
,
15008 exp
, target
, false);
15009 case ALTIVEC_BUILTIN_LVX_V2DF
:
15010 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df
,
15011 exp
, target
, false);
15012 case ALTIVEC_BUILTIN_LVX_V2DI
:
15013 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di
,
15014 exp
, target
, false);
15015 case ALTIVEC_BUILTIN_LVX_V4SF
:
15016 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf
,
15017 exp
, target
, false);
15018 case ALTIVEC_BUILTIN_LVX
:
15019 case ALTIVEC_BUILTIN_LVX_V4SI
:
15020 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
15021 exp
, target
, false);
15022 case ALTIVEC_BUILTIN_LVX_V8HI
:
15023 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi
,
15024 exp
, target
, false);
15025 case ALTIVEC_BUILTIN_LVX_V16QI
:
15026 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi
,
15027 exp
, target
, false);
15028 case ALTIVEC_BUILTIN_LVLX
:
15029 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15030 exp
, target
, true);
15031 case ALTIVEC_BUILTIN_LVLXL
:
15032 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15033 exp
, target
, true);
15034 case ALTIVEC_BUILTIN_LVRX
:
15035 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15036 exp
, target
, true);
15037 case ALTIVEC_BUILTIN_LVRXL
:
15038 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15039 exp
, target
, true);
15040 case VSX_BUILTIN_LXVD2X_V1TI
:
15041 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15042 exp
, target
, false);
15043 case VSX_BUILTIN_LXVD2X_V2DF
:
15044 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15045 exp
, target
, false);
15046 case VSX_BUILTIN_LXVD2X_V2DI
:
15047 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15048 exp
, target
, false);
15049 case VSX_BUILTIN_LXVW4X_V4SF
:
15050 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15051 exp
, target
, false);
15052 case VSX_BUILTIN_LXVW4X_V4SI
:
15053 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15054 exp
, target
, false);
15055 case VSX_BUILTIN_LXVW4X_V8HI
:
15056 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15057 exp
, target
, false);
15058 case VSX_BUILTIN_LXVW4X_V16QI
:
15059 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15060 exp
, target
, false);
15061 /* For the following on big endian, it's ok to use any appropriate
15062 unaligned-supporting load, so use a generic expander. For
15063 little-endian, the exact element-reversing instruction must
15065 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15067 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15068 : CODE_FOR_vsx_ld_elemrev_v2df
);
15069 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15071 case VSX_BUILTIN_LD_ELEMREV_V1TI
:
15073 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v1ti
15074 : CODE_FOR_vsx_ld_elemrev_v1ti
);
15075 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15077 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15079 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15080 : CODE_FOR_vsx_ld_elemrev_v2di
);
15081 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15083 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15085 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15086 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15087 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15089 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15091 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15092 : CODE_FOR_vsx_ld_elemrev_v4si
);
15093 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15095 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15097 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15098 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15099 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15101 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15103 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15104 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15105 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15110 /* Fall through. */
15113 *expandedp
= false;
15117 /* Check whether a builtin function is supported in this target
15120 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode
)
15122 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[fncode
].mask
;
15123 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
15129 /* Raise an error message for a builtin function that is called without the
15130 appropriate target options being set. */
15133 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
15135 size_t uns_fncode
= (size_t) fncode
;
15136 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
15137 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
15139 gcc_assert (name
!= NULL
);
15140 if ((fnmask
& RS6000_BTM_CELL
) != 0)
15141 error ("builtin function %qs is only valid for the cell processor", name
);
15142 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
15143 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
15144 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
15145 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
15146 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
15147 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
15148 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
15149 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
15150 error ("builtin function %qs requires the %qs and %qs options",
15151 name
, "-mhard-dfp", "-mpower8-vector");
15152 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
15153 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
15154 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
15155 error ("builtin function %qs requires the %qs option", name
,
15156 "-mpower8-vector");
15157 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
15158 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
15159 error ("builtin function %qs requires the %qs and %qs options",
15160 name
, "-mcpu=power9", "-m64");
15161 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
15162 error ("builtin function %qs requires the %qs option", name
,
15164 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
15165 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
15166 error ("builtin function %qs requires the %qs and %qs options",
15167 name
, "-mcpu=power9", "-m64");
15168 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
15169 error ("builtin function %qs requires the %qs option", name
,
15171 else if ((fnmask
& RS6000_BTM_LDBL128
) == RS6000_BTM_LDBL128
)
15173 if (!TARGET_HARD_FLOAT
)
15174 error ("builtin function %qs requires the %qs option", name
,
15177 error ("builtin function %qs requires the %qs option", name
,
15178 TARGET_IEEEQUAD
? "-mabi=ibmlongdouble" : "-mlong-double-128");
15180 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
15181 error ("builtin function %qs requires the %qs option", name
,
15183 else if ((fnmask
& RS6000_BTM_FLOAT128_HW
) != 0)
15184 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15186 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
15187 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
15188 else if ((fnmask
& (RS6000_BTM_POPCNTD
| RS6000_BTM_POWERPC64
))
15189 == (RS6000_BTM_POPCNTD
| RS6000_BTM_POWERPC64
))
15190 error ("builtin function %qs requires the %qs (or newer), and "
15191 "%qs or %qs options",
15192 name
, "-mcpu=power7", "-m64", "-mpowerpc64");
15194 error ("builtin function %qs is not supported with the current options",
15198 /* Target hook for early folding of built-ins, shamelessly stolen
15202 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED
,
15203 int n_args ATTRIBUTE_UNUSED
,
15204 tree
*args ATTRIBUTE_UNUSED
,
15205 bool ignore ATTRIBUTE_UNUSED
)
15207 #ifdef SUBTARGET_FOLD_BUILTIN
15208 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
15214 /* Helper function to sort out which built-ins may be valid without having
15217 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code
)
15221 case ALTIVEC_BUILTIN_STVX_V16QI
:
15222 case ALTIVEC_BUILTIN_STVX_V8HI
:
15223 case ALTIVEC_BUILTIN_STVX_V4SI
:
15224 case ALTIVEC_BUILTIN_STVX_V4SF
:
15225 case ALTIVEC_BUILTIN_STVX_V2DI
:
15226 case ALTIVEC_BUILTIN_STVX_V2DF
:
15227 case VSX_BUILTIN_STXVW4X_V16QI
:
15228 case VSX_BUILTIN_STXVW4X_V8HI
:
15229 case VSX_BUILTIN_STXVW4X_V4SF
:
15230 case VSX_BUILTIN_STXVW4X_V4SI
:
15231 case VSX_BUILTIN_STXVD2X_V2DF
:
15232 case VSX_BUILTIN_STXVD2X_V2DI
:
15239 /* Helper function to handle the gimple folding of a vector compare
15240 operation. This sets up true/false vectors, and uses the
15241 VEC_COND_EXPR operation.
15242 CODE indicates which comparison is to be made. (EQ, GT, ...).
15243 TYPE indicates the type of the result. */
15245 fold_build_vec_cmp (tree_code code
, tree type
,
15246 tree arg0
, tree arg1
)
15248 tree cmp_type
= build_same_sized_truth_vector_type (type
);
15249 tree zero_vec
= build_zero_cst (type
);
15250 tree minus_one_vec
= build_minus_one_cst (type
);
15251 tree cmp
= fold_build2 (code
, cmp_type
, arg0
, arg1
);
15252 return fold_build3 (VEC_COND_EXPR
, type
, cmp
, minus_one_vec
, zero_vec
);
15255 /* Helper function to handle the in-between steps for the
15256 vector compare built-ins. */
15258 fold_compare_helper (gimple_stmt_iterator
*gsi
, tree_code code
, gimple
*stmt
)
15260 tree arg0
= gimple_call_arg (stmt
, 0);
15261 tree arg1
= gimple_call_arg (stmt
, 1);
15262 tree lhs
= gimple_call_lhs (stmt
);
15263 tree cmp
= fold_build_vec_cmp (code
, TREE_TYPE (lhs
), arg0
, arg1
);
15264 gimple
*g
= gimple_build_assign (lhs
, cmp
);
15265 gimple_set_location (g
, gimple_location (stmt
));
15266 gsi_replace (gsi
, g
, true);
15269 /* Helper function to map V2DF and V4SF types to their
15270 integral equivalents (V2DI and V4SI). */
15271 tree
map_to_integral_tree_type (tree input_tree_type
)
15273 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type
)))
15274 return input_tree_type
;
15277 if (types_compatible_p (TREE_TYPE (input_tree_type
),
15278 TREE_TYPE (V2DF_type_node
)))
15279 return V2DI_type_node
;
15280 else if (types_compatible_p (TREE_TYPE (input_tree_type
),
15281 TREE_TYPE (V4SF_type_node
)))
15282 return V4SI_type_node
;
15284 gcc_unreachable ();
15288 /* Helper function to handle the vector merge[hl] built-ins. The
15289 implementation difference between h and l versions for this code are in
15290 the values used when building of the permute vector for high word versus
15291 low word merge. The variance is keyed off the use_high parameter. */
15293 fold_mergehl_helper (gimple_stmt_iterator
*gsi
, gimple
*stmt
, int use_high
)
15295 tree arg0
= gimple_call_arg (stmt
, 0);
15296 tree arg1
= gimple_call_arg (stmt
, 1);
15297 tree lhs
= gimple_call_lhs (stmt
);
15298 tree lhs_type
= TREE_TYPE (lhs
);
15299 int n_elts
= TYPE_VECTOR_SUBPARTS (lhs_type
);
15300 int midpoint
= n_elts
/ 2;
15306 /* The permute_type will match the lhs for integral types. For double and
15307 float types, the permute type needs to map to the V2 or V4 type that
15310 permute_type
= map_to_integral_tree_type (lhs_type
);
15311 tree_vector_builder
elts (permute_type
, VECTOR_CST_NELTS (arg0
), 1);
15313 for (int i
= 0; i
< midpoint
; i
++)
15315 elts
.safe_push (build_int_cst (TREE_TYPE (permute_type
),
15317 elts
.safe_push (build_int_cst (TREE_TYPE (permute_type
),
15318 offset
+ n_elts
+ i
));
15321 tree permute
= elts
.build ();
15323 gimple
*g
= gimple_build_assign (lhs
, VEC_PERM_EXPR
, arg0
, arg1
, permute
);
15324 gimple_set_location (g
, gimple_location (stmt
));
15325 gsi_replace (gsi
, g
, true);
15328 /* Helper function to handle the vector merge[eo] built-ins. */
15330 fold_mergeeo_helper (gimple_stmt_iterator
*gsi
, gimple
*stmt
, int use_odd
)
15332 tree arg0
= gimple_call_arg (stmt
, 0);
15333 tree arg1
= gimple_call_arg (stmt
, 1);
15334 tree lhs
= gimple_call_lhs (stmt
);
15335 tree lhs_type
= TREE_TYPE (lhs
);
15336 int n_elts
= TYPE_VECTOR_SUBPARTS (lhs_type
);
15338 /* The permute_type will match the lhs for integral types. For double and
15339 float types, the permute type needs to map to the V2 or V4 type that
15342 permute_type
= map_to_integral_tree_type (lhs_type
);
15344 tree_vector_builder
elts (permute_type
, VECTOR_CST_NELTS (arg0
), 1);
15346 /* Build the permute vector. */
15347 for (int i
= 0; i
< n_elts
/ 2; i
++)
15349 elts
.safe_push (build_int_cst (TREE_TYPE (permute_type
),
15351 elts
.safe_push (build_int_cst (TREE_TYPE (permute_type
),
15352 2*i
+ use_odd
+ n_elts
));
15355 tree permute
= elts
.build ();
15357 gimple
*g
= gimple_build_assign (lhs
, VEC_PERM_EXPR
, arg0
, arg1
, permute
);
15358 gimple_set_location (g
, gimple_location (stmt
));
15359 gsi_replace (gsi
, g
, true);
15362 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15363 a constant, use rs6000_fold_builtin.) */
15366 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
15368 gimple
*stmt
= gsi_stmt (*gsi
);
15369 tree fndecl
= gimple_call_fndecl (stmt
);
15370 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
15371 enum rs6000_builtins fn_code
15372 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15373 tree arg0
, arg1
, lhs
, temp
;
15376 size_t uns_fncode
= (size_t) fn_code
;
15377 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
15378 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
15379 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
15380 ? get_insn_name ((int) icode
)
15383 if (TARGET_DEBUG_BUILTIN
)
15384 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
15385 fn_code
, fn_name1
, fn_name2
);
15387 if (!rs6000_fold_gimple
)
15390 /* Prevent gimple folding for code that does not have a LHS, unless it is
15391 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15392 if (!gimple_call_lhs (stmt
) && !rs6000_builtin_valid_without_lhs (fn_code
))
15395 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15396 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fncode
].mask
;
15397 bool func_valid_p
= (rs6000_builtin_mask
& mask
) == mask
;
15403 /* Flavors of vec_add. We deliberately don't expand
15404 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15405 TImode, resulting in much poorer code generation. */
15406 case ALTIVEC_BUILTIN_VADDUBM
:
15407 case ALTIVEC_BUILTIN_VADDUHM
:
15408 case ALTIVEC_BUILTIN_VADDUWM
:
15409 case P8V_BUILTIN_VADDUDM
:
15410 case ALTIVEC_BUILTIN_VADDFP
:
15411 case VSX_BUILTIN_XVADDDP
:
15412 arg0
= gimple_call_arg (stmt
, 0);
15413 arg1
= gimple_call_arg (stmt
, 1);
15414 lhs
= gimple_call_lhs (stmt
);
15415 g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
15416 gimple_set_location (g
, gimple_location (stmt
));
15417 gsi_replace (gsi
, g
, true);
15419 /* Flavors of vec_sub. We deliberately don't expand
15420 P8V_BUILTIN_VSUBUQM. */
15421 case ALTIVEC_BUILTIN_VSUBUBM
:
15422 case ALTIVEC_BUILTIN_VSUBUHM
:
15423 case ALTIVEC_BUILTIN_VSUBUWM
:
15424 case P8V_BUILTIN_VSUBUDM
:
15425 case ALTIVEC_BUILTIN_VSUBFP
:
15426 case VSX_BUILTIN_XVSUBDP
:
15427 arg0
= gimple_call_arg (stmt
, 0);
15428 arg1
= gimple_call_arg (stmt
, 1);
15429 lhs
= gimple_call_lhs (stmt
);
15430 g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
15431 gimple_set_location (g
, gimple_location (stmt
));
15432 gsi_replace (gsi
, g
, true);
15434 case VSX_BUILTIN_XVMULSP
:
15435 case VSX_BUILTIN_XVMULDP
:
15436 arg0
= gimple_call_arg (stmt
, 0);
15437 arg1
= gimple_call_arg (stmt
, 1);
15438 lhs
= gimple_call_lhs (stmt
);
15439 g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
15440 gimple_set_location (g
, gimple_location (stmt
));
15441 gsi_replace (gsi
, g
, true);
15443 /* Even element flavors of vec_mul (signed). */
15444 case ALTIVEC_BUILTIN_VMULESB
:
15445 case ALTIVEC_BUILTIN_VMULESH
:
15446 case P8V_BUILTIN_VMULESW
:
15447 /* Even element flavors of vec_mul (unsigned). */
15448 case ALTIVEC_BUILTIN_VMULEUB
:
15449 case ALTIVEC_BUILTIN_VMULEUH
:
15450 case P8V_BUILTIN_VMULEUW
:
15451 arg0
= gimple_call_arg (stmt
, 0);
15452 arg1
= gimple_call_arg (stmt
, 1);
15453 lhs
= gimple_call_lhs (stmt
);
15454 g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
15455 gimple_set_location (g
, gimple_location (stmt
));
15456 gsi_replace (gsi
, g
, true);
15458 /* Odd element flavors of vec_mul (signed). */
15459 case ALTIVEC_BUILTIN_VMULOSB
:
15460 case ALTIVEC_BUILTIN_VMULOSH
:
15461 case P8V_BUILTIN_VMULOSW
:
15462 /* Odd element flavors of vec_mul (unsigned). */
15463 case ALTIVEC_BUILTIN_VMULOUB
:
15464 case ALTIVEC_BUILTIN_VMULOUH
:
15465 case P8V_BUILTIN_VMULOUW
:
15466 arg0
= gimple_call_arg (stmt
, 0);
15467 arg1
= gimple_call_arg (stmt
, 1);
15468 lhs
= gimple_call_lhs (stmt
);
15469 g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
15470 gimple_set_location (g
, gimple_location (stmt
));
15471 gsi_replace (gsi
, g
, true);
15473 /* Flavors of vec_div (Integer). */
15474 case VSX_BUILTIN_DIV_V2DI
:
15475 case VSX_BUILTIN_UDIV_V2DI
:
15476 arg0
= gimple_call_arg (stmt
, 0);
15477 arg1
= gimple_call_arg (stmt
, 1);
15478 lhs
= gimple_call_lhs (stmt
);
15479 g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
15480 gimple_set_location (g
, gimple_location (stmt
));
15481 gsi_replace (gsi
, g
, true);
15483 /* Flavors of vec_div (Float). */
15484 case VSX_BUILTIN_XVDIVSP
:
15485 case VSX_BUILTIN_XVDIVDP
:
15486 arg0
= gimple_call_arg (stmt
, 0);
15487 arg1
= gimple_call_arg (stmt
, 1);
15488 lhs
= gimple_call_lhs (stmt
);
15489 g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
15490 gimple_set_location (g
, gimple_location (stmt
));
15491 gsi_replace (gsi
, g
, true);
15493 /* Flavors of vec_and. */
15494 case ALTIVEC_BUILTIN_VAND
:
15495 arg0
= gimple_call_arg (stmt
, 0);
15496 arg1
= gimple_call_arg (stmt
, 1);
15497 lhs
= gimple_call_lhs (stmt
);
15498 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
15499 gimple_set_location (g
, gimple_location (stmt
));
15500 gsi_replace (gsi
, g
, true);
15502 /* Flavors of vec_andc. */
15503 case ALTIVEC_BUILTIN_VANDC
:
15504 arg0
= gimple_call_arg (stmt
, 0);
15505 arg1
= gimple_call_arg (stmt
, 1);
15506 lhs
= gimple_call_lhs (stmt
);
15507 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15508 g
= gimple_build_assign (temp
, BIT_NOT_EXPR
, arg1
);
15509 gimple_set_location (g
, gimple_location (stmt
));
15510 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15511 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
15512 gimple_set_location (g
, gimple_location (stmt
));
15513 gsi_replace (gsi
, g
, true);
15515 /* Flavors of vec_nand. */
15516 case P8V_BUILTIN_VEC_NAND
:
15517 case P8V_BUILTIN_NAND_V16QI
:
15518 case P8V_BUILTIN_NAND_V8HI
:
15519 case P8V_BUILTIN_NAND_V4SI
:
15520 case P8V_BUILTIN_NAND_V4SF
:
15521 case P8V_BUILTIN_NAND_V2DF
:
15522 case P8V_BUILTIN_NAND_V2DI
:
15523 arg0
= gimple_call_arg (stmt
, 0);
15524 arg1
= gimple_call_arg (stmt
, 1);
15525 lhs
= gimple_call_lhs (stmt
);
15526 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15527 g
= gimple_build_assign (temp
, BIT_AND_EXPR
, arg0
, arg1
);
15528 gimple_set_location (g
, gimple_location (stmt
));
15529 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15530 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
15531 gimple_set_location (g
, gimple_location (stmt
));
15532 gsi_replace (gsi
, g
, true);
15534 /* Flavors of vec_or. */
15535 case ALTIVEC_BUILTIN_VOR
:
15536 arg0
= gimple_call_arg (stmt
, 0);
15537 arg1
= gimple_call_arg (stmt
, 1);
15538 lhs
= gimple_call_lhs (stmt
);
15539 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
15540 gimple_set_location (g
, gimple_location (stmt
));
15541 gsi_replace (gsi
, g
, true);
15543 /* flavors of vec_orc. */
15544 case P8V_BUILTIN_ORC_V16QI
:
15545 case P8V_BUILTIN_ORC_V8HI
:
15546 case P8V_BUILTIN_ORC_V4SI
:
15547 case P8V_BUILTIN_ORC_V4SF
:
15548 case P8V_BUILTIN_ORC_V2DF
:
15549 case P8V_BUILTIN_ORC_V2DI
:
15550 arg0
= gimple_call_arg (stmt
, 0);
15551 arg1
= gimple_call_arg (stmt
, 1);
15552 lhs
= gimple_call_lhs (stmt
);
15553 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15554 g
= gimple_build_assign (temp
, BIT_NOT_EXPR
, arg1
);
15555 gimple_set_location (g
, gimple_location (stmt
));
15556 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15557 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
15558 gimple_set_location (g
, gimple_location (stmt
));
15559 gsi_replace (gsi
, g
, true);
15561 /* Flavors of vec_xor. */
15562 case ALTIVEC_BUILTIN_VXOR
:
15563 arg0
= gimple_call_arg (stmt
, 0);
15564 arg1
= gimple_call_arg (stmt
, 1);
15565 lhs
= gimple_call_lhs (stmt
);
15566 g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
15567 gimple_set_location (g
, gimple_location (stmt
));
15568 gsi_replace (gsi
, g
, true);
15570 /* Flavors of vec_nor. */
15571 case ALTIVEC_BUILTIN_VNOR
:
15572 arg0
= gimple_call_arg (stmt
, 0);
15573 arg1
= gimple_call_arg (stmt
, 1);
15574 lhs
= gimple_call_lhs (stmt
);
15575 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15576 g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
15577 gimple_set_location (g
, gimple_location (stmt
));
15578 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15579 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
15580 gimple_set_location (g
, gimple_location (stmt
));
15581 gsi_replace (gsi
, g
, true);
15583 /* flavors of vec_abs. */
15584 case ALTIVEC_BUILTIN_ABS_V16QI
:
15585 case ALTIVEC_BUILTIN_ABS_V8HI
:
15586 case ALTIVEC_BUILTIN_ABS_V4SI
:
15587 case ALTIVEC_BUILTIN_ABS_V4SF
:
15588 case P8V_BUILTIN_ABS_V2DI
:
15589 case VSX_BUILTIN_XVABSDP
:
15590 arg0
= gimple_call_arg (stmt
, 0);
15591 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
15592 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
15594 lhs
= gimple_call_lhs (stmt
);
15595 g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
15596 gimple_set_location (g
, gimple_location (stmt
));
15597 gsi_replace (gsi
, g
, true);
15599 /* flavors of vec_min. */
15600 case VSX_BUILTIN_XVMINDP
:
15601 case P8V_BUILTIN_VMINSD
:
15602 case P8V_BUILTIN_VMINUD
:
15603 case ALTIVEC_BUILTIN_VMINSB
:
15604 case ALTIVEC_BUILTIN_VMINSH
:
15605 case ALTIVEC_BUILTIN_VMINSW
:
15606 case ALTIVEC_BUILTIN_VMINUB
:
15607 case ALTIVEC_BUILTIN_VMINUH
:
15608 case ALTIVEC_BUILTIN_VMINUW
:
15609 case ALTIVEC_BUILTIN_VMINFP
:
15610 arg0
= gimple_call_arg (stmt
, 0);
15611 arg1
= gimple_call_arg (stmt
, 1);
15612 lhs
= gimple_call_lhs (stmt
);
15613 g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
15614 gimple_set_location (g
, gimple_location (stmt
));
15615 gsi_replace (gsi
, g
, true);
15617 /* flavors of vec_max. */
15618 case VSX_BUILTIN_XVMAXDP
:
15619 case P8V_BUILTIN_VMAXSD
:
15620 case P8V_BUILTIN_VMAXUD
:
15621 case ALTIVEC_BUILTIN_VMAXSB
:
15622 case ALTIVEC_BUILTIN_VMAXSH
:
15623 case ALTIVEC_BUILTIN_VMAXSW
:
15624 case ALTIVEC_BUILTIN_VMAXUB
:
15625 case ALTIVEC_BUILTIN_VMAXUH
:
15626 case ALTIVEC_BUILTIN_VMAXUW
:
15627 case ALTIVEC_BUILTIN_VMAXFP
:
15628 arg0
= gimple_call_arg (stmt
, 0);
15629 arg1
= gimple_call_arg (stmt
, 1);
15630 lhs
= gimple_call_lhs (stmt
);
15631 g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
15632 gimple_set_location (g
, gimple_location (stmt
));
15633 gsi_replace (gsi
, g
, true);
15635 /* Flavors of vec_eqv. */
15636 case P8V_BUILTIN_EQV_V16QI
:
15637 case P8V_BUILTIN_EQV_V8HI
:
15638 case P8V_BUILTIN_EQV_V4SI
:
15639 case P8V_BUILTIN_EQV_V4SF
:
15640 case P8V_BUILTIN_EQV_V2DF
:
15641 case P8V_BUILTIN_EQV_V2DI
:
15642 arg0
= gimple_call_arg (stmt
, 0);
15643 arg1
= gimple_call_arg (stmt
, 1);
15644 lhs
= gimple_call_lhs (stmt
);
15645 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15646 g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
15647 gimple_set_location (g
, gimple_location (stmt
));
15648 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15649 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
15650 gimple_set_location (g
, gimple_location (stmt
));
15651 gsi_replace (gsi
, g
, true);
15653 /* Flavors of vec_rotate_left. */
15654 case ALTIVEC_BUILTIN_VRLB
:
15655 case ALTIVEC_BUILTIN_VRLH
:
15656 case ALTIVEC_BUILTIN_VRLW
:
15657 case P8V_BUILTIN_VRLD
:
15658 arg0
= gimple_call_arg (stmt
, 0);
15659 arg1
= gimple_call_arg (stmt
, 1);
15660 lhs
= gimple_call_lhs (stmt
);
15661 g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
15662 gimple_set_location (g
, gimple_location (stmt
));
15663 gsi_replace (gsi
, g
, true);
15665 /* Flavors of vector shift right algebraic.
15666 vec_sra{b,h,w} -> vsra{b,h,w}. */
15667 case ALTIVEC_BUILTIN_VSRAB
:
15668 case ALTIVEC_BUILTIN_VSRAH
:
15669 case ALTIVEC_BUILTIN_VSRAW
:
15670 case P8V_BUILTIN_VSRAD
:
15671 arg0
= gimple_call_arg (stmt
, 0);
15672 arg1
= gimple_call_arg (stmt
, 1);
15673 lhs
= gimple_call_lhs (stmt
);
15674 g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
15675 gimple_set_location (g
, gimple_location (stmt
));
15676 gsi_replace (gsi
, g
, true);
15678 /* Flavors of vector shift left.
15679 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15680 case ALTIVEC_BUILTIN_VSLB
:
15681 case ALTIVEC_BUILTIN_VSLH
:
15682 case ALTIVEC_BUILTIN_VSLW
:
15683 case P8V_BUILTIN_VSLD
:
15686 gimple_seq stmts
= NULL
;
15687 arg0
= gimple_call_arg (stmt
, 0);
15688 tree arg0_type
= TREE_TYPE (arg0
);
15689 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type
))
15690 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type
)))
15692 arg1
= gimple_call_arg (stmt
, 1);
15693 tree arg1_type
= TREE_TYPE (arg1
);
15694 tree unsigned_arg1_type
= unsigned_type_for (TREE_TYPE (arg1
));
15695 tree unsigned_element_type
= unsigned_type_for (TREE_TYPE (arg1_type
));
15696 loc
= gimple_location (stmt
);
15697 lhs
= gimple_call_lhs (stmt
);
15698 /* Force arg1 into the range valid matching the arg0 type. */
15699 /* Build a vector consisting of the max valid bit-size values. */
15700 int n_elts
= VECTOR_CST_NELTS (arg1
);
15701 int tree_size_in_bits
= TREE_INT_CST_LOW (size_in_bytes (arg1_type
))
15703 tree element_size
= build_int_cst (unsigned_element_type
,
15704 tree_size_in_bits
/ n_elts
);
15705 tree_vector_builder
elts (unsigned_type_for (arg1_type
), n_elts
, 1);
15706 for (int i
= 0; i
< n_elts
; i
++)
15707 elts
.safe_push (element_size
);
15708 tree modulo_tree
= elts
.build ();
15709 /* Modulo the provided shift value against that vector. */
15710 tree unsigned_arg1
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
15711 unsigned_arg1_type
, arg1
);
15712 tree new_arg1
= gimple_build (&stmts
, loc
, TRUNC_MOD_EXPR
,
15713 unsigned_arg1_type
, unsigned_arg1
,
15715 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15716 /* And finally, do the shift. */
15717 g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, new_arg1
);
15718 gimple_set_location (g
, gimple_location (stmt
));
15719 gsi_replace (gsi
, g
, true);
15722 /* Flavors of vector shift right. */
15723 case ALTIVEC_BUILTIN_VSRB
:
15724 case ALTIVEC_BUILTIN_VSRH
:
15725 case ALTIVEC_BUILTIN_VSRW
:
15726 case P8V_BUILTIN_VSRD
:
15728 arg0
= gimple_call_arg (stmt
, 0);
15729 arg1
= gimple_call_arg (stmt
, 1);
15730 lhs
= gimple_call_lhs (stmt
);
15731 gimple_seq stmts
= NULL
;
15732 /* Convert arg0 to unsigned. */
15734 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
15735 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
15737 = gimple_build (&stmts
, RSHIFT_EXPR
,
15738 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
15739 /* Convert result back to the lhs type. */
15740 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
15741 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15742 update_call_from_tree (gsi
, res
);
15745 /* Vector loads. */
15746 case ALTIVEC_BUILTIN_LVX_V16QI
:
15747 case ALTIVEC_BUILTIN_LVX_V8HI
:
15748 case ALTIVEC_BUILTIN_LVX_V4SI
:
15749 case ALTIVEC_BUILTIN_LVX_V4SF
:
15750 case ALTIVEC_BUILTIN_LVX_V2DI
:
15751 case ALTIVEC_BUILTIN_LVX_V2DF
:
15752 case ALTIVEC_BUILTIN_LVX_V1TI
:
15754 arg0
= gimple_call_arg (stmt
, 0); // offset
15755 arg1
= gimple_call_arg (stmt
, 1); // address
15756 lhs
= gimple_call_lhs (stmt
);
15757 location_t loc
= gimple_location (stmt
);
15758 /* Since arg1 may be cast to a different type, just use ptr_type_node
15759 here instead of trying to enforce TBAA on pointer types. */
15760 tree arg1_type
= ptr_type_node
;
15761 tree lhs_type
= TREE_TYPE (lhs
);
15762 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15763 the tree using the value from arg0. The resulting type will match
15764 the type of arg1. */
15765 gimple_seq stmts
= NULL
;
15766 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg0
);
15767 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
15768 arg1_type
, arg1
, temp_offset
);
15769 /* Mask off any lower bits from the address. */
15770 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
15771 arg1_type
, temp_addr
,
15772 build_int_cst (arg1_type
, -16));
15773 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15774 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15775 take an offset, but since we've already incorporated the offset
15776 above, here we just pass in a zero. */
15778 = gimple_build_assign (lhs
, build2 (MEM_REF
, lhs_type
, aligned_addr
,
15779 build_int_cst (arg1_type
, 0)));
15780 gimple_set_location (g
, loc
);
15781 gsi_replace (gsi
, g
, true);
15784 /* Vector stores. */
15785 case ALTIVEC_BUILTIN_STVX_V16QI
:
15786 case ALTIVEC_BUILTIN_STVX_V8HI
:
15787 case ALTIVEC_BUILTIN_STVX_V4SI
:
15788 case ALTIVEC_BUILTIN_STVX_V4SF
:
15789 case ALTIVEC_BUILTIN_STVX_V2DI
:
15790 case ALTIVEC_BUILTIN_STVX_V2DF
:
15792 arg0
= gimple_call_arg (stmt
, 0); /* Value to be stored. */
15793 arg1
= gimple_call_arg (stmt
, 1); /* Offset. */
15794 tree arg2
= gimple_call_arg (stmt
, 2); /* Store-to address. */
15795 location_t loc
= gimple_location (stmt
);
15796 tree arg0_type
= TREE_TYPE (arg0
);
15797 /* Use ptr_type_node (no TBAA) for the arg2_type.
15798 FIXME: (Richard) "A proper fix would be to transition this type as
15799 seen from the frontend to GIMPLE, for example in a similar way we
15800 do for MEM_REFs by piggy-backing that on an extra argument, a
15801 constant zero pointer of the alias pointer type to use (which would
15802 also serve as a type indicator of the store itself). I'd use a
15803 target specific internal function for this (not sure if we can have
15804 those target specific, but I guess if it's folded away then that's
15805 fine) and get away with the overload set." */
15806 tree arg2_type
= ptr_type_node
;
15807 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15808 the tree using the value from arg0. The resulting type will match
15809 the type of arg2. */
15810 gimple_seq stmts
= NULL
;
15811 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg1
);
15812 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
15813 arg2_type
, arg2
, temp_offset
);
15814 /* Mask off any lower bits from the address. */
15815 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
15816 arg2_type
, temp_addr
,
15817 build_int_cst (arg2_type
, -16));
15818 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15819 /* The desired gimple result should be similar to:
15820 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15822 = gimple_build_assign (build2 (MEM_REF
, arg0_type
, aligned_addr
,
15823 build_int_cst (arg2_type
, 0)), arg0
);
15824 gimple_set_location (g
, loc
);
15825 gsi_replace (gsi
, g
, true);
15829 /* unaligned Vector loads. */
15830 case VSX_BUILTIN_LXVW4X_V16QI
:
15831 case VSX_BUILTIN_LXVW4X_V8HI
:
15832 case VSX_BUILTIN_LXVW4X_V4SF
:
15833 case VSX_BUILTIN_LXVW4X_V4SI
:
15834 case VSX_BUILTIN_LXVD2X_V2DF
:
15835 case VSX_BUILTIN_LXVD2X_V2DI
:
15837 arg0
= gimple_call_arg (stmt
, 0); // offset
15838 arg1
= gimple_call_arg (stmt
, 1); // address
15839 lhs
= gimple_call_lhs (stmt
);
15840 location_t loc
= gimple_location (stmt
);
15841 /* Since arg1 may be cast to a different type, just use ptr_type_node
15842 here instead of trying to enforce TBAA on pointer types. */
15843 tree arg1_type
= ptr_type_node
;
15844 tree lhs_type
= TREE_TYPE (lhs
);
15845 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15846 required alignment (power) is 4 bytes regardless of data type. */
15847 tree align_ltype
= build_aligned_type (lhs_type
, 4);
15848 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15849 the tree using the value from arg0. The resulting type will match
15850 the type of arg1. */
15851 gimple_seq stmts
= NULL
;
15852 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg0
);
15853 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
15854 arg1_type
, arg1
, temp_offset
);
15855 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15856 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15857 take an offset, but since we've already incorporated the offset
15858 above, here we just pass in a zero. */
15860 g
= gimple_build_assign (lhs
, build2 (MEM_REF
, align_ltype
, temp_addr
,
15861 build_int_cst (arg1_type
, 0)));
15862 gimple_set_location (g
, loc
);
15863 gsi_replace (gsi
, g
, true);
15867 /* unaligned Vector stores. */
15868 case VSX_BUILTIN_STXVW4X_V16QI
:
15869 case VSX_BUILTIN_STXVW4X_V8HI
:
15870 case VSX_BUILTIN_STXVW4X_V4SF
:
15871 case VSX_BUILTIN_STXVW4X_V4SI
:
15872 case VSX_BUILTIN_STXVD2X_V2DF
:
15873 case VSX_BUILTIN_STXVD2X_V2DI
:
15875 arg0
= gimple_call_arg (stmt
, 0); /* Value to be stored. */
15876 arg1
= gimple_call_arg (stmt
, 1); /* Offset. */
15877 tree arg2
= gimple_call_arg (stmt
, 2); /* Store-to address. */
15878 location_t loc
= gimple_location (stmt
);
15879 tree arg0_type
= TREE_TYPE (arg0
);
15880 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15881 tree arg2_type
= ptr_type_node
;
15882 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15883 required alignment (power) is 4 bytes regardless of data type. */
15884 tree align_stype
= build_aligned_type (arg0_type
, 4);
15885 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15886 the tree using the value from arg1. */
15887 gimple_seq stmts
= NULL
;
15888 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg1
);
15889 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
15890 arg2_type
, arg2
, temp_offset
);
15891 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15893 g
= gimple_build_assign (build2 (MEM_REF
, align_stype
, temp_addr
,
15894 build_int_cst (arg2_type
, 0)), arg0
);
15895 gimple_set_location (g
, loc
);
15896 gsi_replace (gsi
, g
, true);
15900 /* Vector Fused multiply-add (fma). */
15901 case ALTIVEC_BUILTIN_VMADDFP
:
15902 case VSX_BUILTIN_XVMADDDP
:
15903 case ALTIVEC_BUILTIN_VMLADDUHM
:
15905 arg0
= gimple_call_arg (stmt
, 0);
15906 arg1
= gimple_call_arg (stmt
, 1);
15907 tree arg2
= gimple_call_arg (stmt
, 2);
15908 lhs
= gimple_call_lhs (stmt
);
15909 gcall
*g
= gimple_build_call_internal (IFN_FMA
, 3, arg0
, arg1
, arg2
);
15910 gimple_call_set_lhs (g
, lhs
);
15911 gimple_call_set_nothrow (g
, true);
15912 gimple_set_location (g
, gimple_location (stmt
));
15913 gsi_replace (gsi
, g
, true);
15917 /* Vector compares; EQ, NE, GE, GT, LE. */
15918 case ALTIVEC_BUILTIN_VCMPEQUB
:
15919 case ALTIVEC_BUILTIN_VCMPEQUH
:
15920 case ALTIVEC_BUILTIN_VCMPEQUW
:
15921 case P8V_BUILTIN_VCMPEQUD
:
15922 fold_compare_helper (gsi
, EQ_EXPR
, stmt
);
15925 case P9V_BUILTIN_CMPNEB
:
15926 case P9V_BUILTIN_CMPNEH
:
15927 case P9V_BUILTIN_CMPNEW
:
15928 fold_compare_helper (gsi
, NE_EXPR
, stmt
);
15931 case VSX_BUILTIN_CMPGE_16QI
:
15932 case VSX_BUILTIN_CMPGE_U16QI
:
15933 case VSX_BUILTIN_CMPGE_8HI
:
15934 case VSX_BUILTIN_CMPGE_U8HI
:
15935 case VSX_BUILTIN_CMPGE_4SI
:
15936 case VSX_BUILTIN_CMPGE_U4SI
:
15937 case VSX_BUILTIN_CMPGE_2DI
:
15938 case VSX_BUILTIN_CMPGE_U2DI
:
15939 fold_compare_helper (gsi
, GE_EXPR
, stmt
);
15942 case ALTIVEC_BUILTIN_VCMPGTSB
:
15943 case ALTIVEC_BUILTIN_VCMPGTUB
:
15944 case ALTIVEC_BUILTIN_VCMPGTSH
:
15945 case ALTIVEC_BUILTIN_VCMPGTUH
:
15946 case ALTIVEC_BUILTIN_VCMPGTSW
:
15947 case ALTIVEC_BUILTIN_VCMPGTUW
:
15948 case P8V_BUILTIN_VCMPGTUD
:
15949 case P8V_BUILTIN_VCMPGTSD
:
15950 fold_compare_helper (gsi
, GT_EXPR
, stmt
);
15953 case VSX_BUILTIN_CMPLE_16QI
:
15954 case VSX_BUILTIN_CMPLE_U16QI
:
15955 case VSX_BUILTIN_CMPLE_8HI
:
15956 case VSX_BUILTIN_CMPLE_U8HI
:
15957 case VSX_BUILTIN_CMPLE_4SI
:
15958 case VSX_BUILTIN_CMPLE_U4SI
:
15959 case VSX_BUILTIN_CMPLE_2DI
:
15960 case VSX_BUILTIN_CMPLE_U2DI
:
15961 fold_compare_helper (gsi
, LE_EXPR
, stmt
);
15964 /* flavors of vec_splat_[us]{8,16,32}. */
15965 case ALTIVEC_BUILTIN_VSPLTISB
:
15966 case ALTIVEC_BUILTIN_VSPLTISH
:
15967 case ALTIVEC_BUILTIN_VSPLTISW
:
15970 if (fn_code
== ALTIVEC_BUILTIN_VSPLTISB
)
15972 else if (fn_code
== ALTIVEC_BUILTIN_VSPLTISH
)
15977 arg0
= gimple_call_arg (stmt
, 0);
15978 lhs
= gimple_call_lhs (stmt
);
15980 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15981 5-bit signed constant in range -16 to +15. */
15982 if (TREE_CODE (arg0
) != INTEGER_CST
15983 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0
), size
),
15986 gimple_seq stmts
= NULL
;
15987 location_t loc
= gimple_location (stmt
);
15988 tree splat_value
= gimple_convert (&stmts
, loc
,
15989 TREE_TYPE (TREE_TYPE (lhs
)), arg0
);
15990 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15991 tree splat_tree
= build_vector_from_val (TREE_TYPE (lhs
), splat_value
);
15992 g
= gimple_build_assign (lhs
, splat_tree
);
15993 gimple_set_location (g
, gimple_location (stmt
));
15994 gsi_replace (gsi
, g
, true);
15998 /* Flavors of vec_splat. */
15999 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16000 case ALTIVEC_BUILTIN_VSPLTB
:
16001 case ALTIVEC_BUILTIN_VSPLTH
:
16002 case ALTIVEC_BUILTIN_VSPLTW
:
16003 case VSX_BUILTIN_XXSPLTD_V2DI
:
16004 case VSX_BUILTIN_XXSPLTD_V2DF
:
16006 arg0
= gimple_call_arg (stmt
, 0); /* input vector. */
16007 arg1
= gimple_call_arg (stmt
, 1); /* index into arg0. */
16008 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16009 is a valid index into the arg0 vector. */
16010 unsigned int n_elts
= VECTOR_CST_NELTS (arg0
);
16011 if (TREE_CODE (arg1
) != INTEGER_CST
16012 || TREE_INT_CST_LOW (arg1
) > (n_elts
-1))
16014 lhs
= gimple_call_lhs (stmt
);
16015 tree lhs_type
= TREE_TYPE (lhs
);
16016 tree arg0_type
= TREE_TYPE (arg0
);
16018 if (TREE_CODE (arg0
) == VECTOR_CST
)
16019 splat
= VECTOR_CST_ELT (arg0
, TREE_INT_CST_LOW (arg1
));
16022 /* Determine (in bits) the length and start location of the
16023 splat value for a call to the tree_vec_extract helper. */
16024 int splat_elem_size
= TREE_INT_CST_LOW (size_in_bytes (arg0_type
))
16025 * BITS_PER_UNIT
/ n_elts
;
16026 int splat_start_bit
= TREE_INT_CST_LOW (arg1
) * splat_elem_size
;
16027 tree len
= build_int_cst (bitsizetype
, splat_elem_size
);
16028 tree start
= build_int_cst (bitsizetype
, splat_start_bit
);
16029 splat
= tree_vec_extract (gsi
, TREE_TYPE (lhs_type
), arg0
,
16032 /* And finally, build the new vector. */
16033 tree splat_tree
= build_vector_from_val (lhs_type
, splat
);
16034 g
= gimple_build_assign (lhs
, splat_tree
);
16035 gimple_set_location (g
, gimple_location (stmt
));
16036 gsi_replace (gsi
, g
, true);
16040 /* vec_mergel (integrals). */
16041 case ALTIVEC_BUILTIN_VMRGLH
:
16042 case ALTIVEC_BUILTIN_VMRGLW
:
16043 case VSX_BUILTIN_XXMRGLW_4SI
:
16044 case ALTIVEC_BUILTIN_VMRGLB
:
16045 case VSX_BUILTIN_VEC_MERGEL_V2DI
:
16046 case VSX_BUILTIN_XXMRGLW_4SF
:
16047 case VSX_BUILTIN_VEC_MERGEL_V2DF
:
16048 fold_mergehl_helper (gsi
, stmt
, 1);
16050 /* vec_mergeh (integrals). */
16051 case ALTIVEC_BUILTIN_VMRGHH
:
16052 case ALTIVEC_BUILTIN_VMRGHW
:
16053 case VSX_BUILTIN_XXMRGHW_4SI
:
16054 case ALTIVEC_BUILTIN_VMRGHB
:
16055 case VSX_BUILTIN_VEC_MERGEH_V2DI
:
16056 case VSX_BUILTIN_XXMRGHW_4SF
:
16057 case VSX_BUILTIN_VEC_MERGEH_V2DF
:
16058 fold_mergehl_helper (gsi
, stmt
, 0);
16061 /* Flavors of vec_mergee. */
16062 case P8V_BUILTIN_VMRGEW_V4SI
:
16063 case P8V_BUILTIN_VMRGEW_V2DI
:
16064 case P8V_BUILTIN_VMRGEW_V4SF
:
16065 case P8V_BUILTIN_VMRGEW_V2DF
:
16066 fold_mergeeo_helper (gsi
, stmt
, 0);
16068 /* Flavors of vec_mergeo. */
16069 case P8V_BUILTIN_VMRGOW_V4SI
:
16070 case P8V_BUILTIN_VMRGOW_V2DI
:
16071 case P8V_BUILTIN_VMRGOW_V4SF
:
16072 case P8V_BUILTIN_VMRGOW_V2DF
:
16073 fold_mergeeo_helper (gsi
, stmt
, 1);
16076 /* d = vec_pack (a, b) */
16077 case P8V_BUILTIN_VPKUDUM
:
16078 case ALTIVEC_BUILTIN_VPKUHUM
:
16079 case ALTIVEC_BUILTIN_VPKUWUM
:
16081 arg0
= gimple_call_arg (stmt
, 0);
16082 arg1
= gimple_call_arg (stmt
, 1);
16083 lhs
= gimple_call_lhs (stmt
);
16084 gimple
*g
= gimple_build_assign (lhs
, VEC_PACK_TRUNC_EXPR
, arg0
, arg1
);
16085 gimple_set_location (g
, gimple_location (stmt
));
16086 gsi_replace (gsi
, g
, true);
16090 /* d = vec_unpackh (a) */
16091 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16092 in this code is sensitive to endian-ness, and needs to be inverted to
16093 handle both LE and BE targets. */
16094 case ALTIVEC_BUILTIN_VUPKHSB
:
16095 case ALTIVEC_BUILTIN_VUPKHSH
:
16096 case P8V_BUILTIN_VUPKHSW
:
16098 arg0
= gimple_call_arg (stmt
, 0);
16099 lhs
= gimple_call_lhs (stmt
);
16100 if (BYTES_BIG_ENDIAN
)
16101 g
= gimple_build_assign (lhs
, VEC_UNPACK_HI_EXPR
, arg0
);
16103 g
= gimple_build_assign (lhs
, VEC_UNPACK_LO_EXPR
, arg0
);
16104 gimple_set_location (g
, gimple_location (stmt
));
16105 gsi_replace (gsi
, g
, true);
16108 /* d = vec_unpackl (a) */
16109 case ALTIVEC_BUILTIN_VUPKLSB
:
16110 case ALTIVEC_BUILTIN_VUPKLSH
:
16111 case P8V_BUILTIN_VUPKLSW
:
16113 arg0
= gimple_call_arg (stmt
, 0);
16114 lhs
= gimple_call_lhs (stmt
);
16115 if (BYTES_BIG_ENDIAN
)
16116 g
= gimple_build_assign (lhs
, VEC_UNPACK_LO_EXPR
, arg0
);
16118 g
= gimple_build_assign (lhs
, VEC_UNPACK_HI_EXPR
, arg0
);
16119 gimple_set_location (g
, gimple_location (stmt
));
16120 gsi_replace (gsi
, g
, true);
16123 /* There is no gimple type corresponding with pixel, so just return. */
16124 case ALTIVEC_BUILTIN_VUPKHPX
:
16125 case ALTIVEC_BUILTIN_VUPKLPX
:
16129 case ALTIVEC_BUILTIN_VPERM_16QI
:
16130 case ALTIVEC_BUILTIN_VPERM_8HI
:
16131 case ALTIVEC_BUILTIN_VPERM_4SI
:
16132 case ALTIVEC_BUILTIN_VPERM_2DI
:
16133 case ALTIVEC_BUILTIN_VPERM_4SF
:
16134 case ALTIVEC_BUILTIN_VPERM_2DF
:
16136 arg0
= gimple_call_arg (stmt
, 0);
16137 arg1
= gimple_call_arg (stmt
, 1);
16138 tree permute
= gimple_call_arg (stmt
, 2);
16139 lhs
= gimple_call_lhs (stmt
);
16140 location_t loc
= gimple_location (stmt
);
16141 gimple_seq stmts
= NULL
;
16142 // convert arg0 and arg1 to match the type of the permute
16143 // for the VEC_PERM_EXPR operation.
16144 tree permute_type
= (TREE_TYPE (permute
));
16145 tree arg0_ptype
= gimple_convert (&stmts
, loc
, permute_type
, arg0
);
16146 tree arg1_ptype
= gimple_convert (&stmts
, loc
, permute_type
, arg1
);
16147 tree lhs_ptype
= gimple_build (&stmts
, loc
, VEC_PERM_EXPR
,
16148 permute_type
, arg0_ptype
, arg1_ptype
,
16150 // Convert the result back to the desired lhs type upon completion.
16151 tree temp
= gimple_convert (&stmts
, loc
, TREE_TYPE (lhs
), lhs_ptype
);
16152 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16153 g
= gimple_build_assign (lhs
, temp
);
16154 gimple_set_location (g
, loc
);
16155 gsi_replace (gsi
, g
, true);
16160 if (TARGET_DEBUG_BUILTIN
)
16161 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16162 fn_code
, fn_name1
, fn_name2
);
16169 /* Expand an expression EXP that calls a built-in function,
16170 with result going to TARGET if that's convenient
16171 (and in mode MODE if that's convenient).
16172 SUBTARGET may be used as the target for computing one of EXP's operands.
16173 IGNORE is nonzero if the value is to be ignored. */
16176 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16177 machine_mode mode ATTRIBUTE_UNUSED
,
16178 int ignore ATTRIBUTE_UNUSED
)
16180 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16181 enum rs6000_builtins fcode
16182 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16183 size_t uns_fcode
= (size_t)fcode
;
16184 const struct builtin_description
*d
;
16188 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16189 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16190 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16192 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16193 floating point type, depending on whether long double is the IBM extended
16194 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16195 we only define one variant of the built-in function, and switch the code
16196 when defining it, rather than defining two built-ins and using the
16197 overload table in rs6000-c.c to switch between the two. If we don't have
16198 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16199 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16200 if (FLOAT128_IEEE_P (TFmode
))
16206 case CODE_FOR_sqrtkf2_odd
: icode
= CODE_FOR_sqrttf2_odd
; break;
16207 case CODE_FOR_trunckfdf2_odd
: icode
= CODE_FOR_trunctfdf2_odd
; break;
16208 case CODE_FOR_addkf3_odd
: icode
= CODE_FOR_addtf3_odd
; break;
16209 case CODE_FOR_subkf3_odd
: icode
= CODE_FOR_subtf3_odd
; break;
16210 case CODE_FOR_mulkf3_odd
: icode
= CODE_FOR_multf3_odd
; break;
16211 case CODE_FOR_divkf3_odd
: icode
= CODE_FOR_divtf3_odd
; break;
16212 case CODE_FOR_fmakf4_odd
: icode
= CODE_FOR_fmatf4_odd
; break;
16213 case CODE_FOR_xsxexpqp_kf
: icode
= CODE_FOR_xsxexpqp_tf
; break;
16214 case CODE_FOR_xsxsigqp_kf
: icode
= CODE_FOR_xsxsigqp_tf
; break;
16215 case CODE_FOR_xststdcnegqp_kf
: icode
= CODE_FOR_xststdcnegqp_tf
; break;
16216 case CODE_FOR_xsiexpqp_kf
: icode
= CODE_FOR_xsiexpqp_tf
; break;
16217 case CODE_FOR_xsiexpqpf_kf
: icode
= CODE_FOR_xsiexpqpf_tf
; break;
16218 case CODE_FOR_xststdcqp_kf
: icode
= CODE_FOR_xststdcqp_tf
; break;
16221 if (TARGET_DEBUG_BUILTIN
)
16223 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16224 const char *name2
= (icode
!= CODE_FOR_nothing
)
16225 ? get_insn_name ((int) icode
)
16229 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16231 default: name3
= "unknown"; break;
16232 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16233 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16234 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16235 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16236 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16237 case RS6000_BTC_ABS
: name3
= "abs"; break;
16238 case RS6000_BTC_DST
: name3
= "dst"; break;
16243 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16244 (name1
) ? name1
: "---", fcode
,
16245 (name2
) ? name2
: "---", (int) icode
,
16247 func_valid_p
? "" : ", not valid");
16252 rs6000_invalid_builtin (fcode
);
16254 /* Given it is invalid, just generate a normal call. */
16255 return expand_call (exp
, target
, ignore
);
16260 case RS6000_BUILTIN_RECIP
:
16261 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16263 case RS6000_BUILTIN_RECIPF
:
16264 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16266 case RS6000_BUILTIN_RSQRTF
:
16267 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16269 case RS6000_BUILTIN_RSQRT
:
16270 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16272 case POWER7_BUILTIN_BPERMD
:
16273 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16274 ? CODE_FOR_bpermd_di
16275 : CODE_FOR_bpermd_si
), exp
, target
);
16277 case RS6000_BUILTIN_GET_TB
:
16278 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16281 case RS6000_BUILTIN_MFTB
:
16282 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16283 ? CODE_FOR_rs6000_mftb_di
16284 : CODE_FOR_rs6000_mftb_si
),
16287 case RS6000_BUILTIN_MFFS
:
16288 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16290 case RS6000_BUILTIN_MTFSB0
:
16291 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0
, exp
);
16293 case RS6000_BUILTIN_MTFSB1
:
16294 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1
, exp
);
16296 case RS6000_BUILTIN_SET_FPSCR_RN
:
16297 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn
,
16300 case RS6000_BUILTIN_SET_FPSCR_DRN
:
16302 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn
,
16305 case RS6000_BUILTIN_MFFSL
:
16306 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl
, target
);
16308 case RS6000_BUILTIN_MTFSF
:
16309 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16311 case RS6000_BUILTIN_CPU_INIT
:
16312 case RS6000_BUILTIN_CPU_IS
:
16313 case RS6000_BUILTIN_CPU_SUPPORTS
:
16314 return cpu_expand_builtin (fcode
, exp
, target
);
16316 case MISC_BUILTIN_SPEC_BARRIER
:
16318 emit_insn (gen_speculation_barrier ());
16322 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16323 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16325 int icode2
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16326 : (int) CODE_FOR_altivec_lvsl_direct
);
16327 machine_mode tmode
= insn_data
[icode2
].operand
[0].mode
;
16328 machine_mode mode
= insn_data
[icode2
].operand
[1].mode
;
16332 gcc_assert (TARGET_ALTIVEC
);
16334 arg
= CALL_EXPR_ARG (exp
, 0);
16335 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16336 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16337 addr
= memory_address (mode
, op
);
16338 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16342 /* For the load case need to negate the address. */
16343 op
= gen_reg_rtx (GET_MODE (addr
));
16344 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16346 op
= gen_rtx_MEM (mode
, op
);
16349 || GET_MODE (target
) != tmode
16350 || ! (*insn_data
[icode2
].operand
[0].predicate
) (target
, tmode
))
16351 target
= gen_reg_rtx (tmode
);
16353 pat
= GEN_FCN (icode2
) (target
, op
);
16361 case ALTIVEC_BUILTIN_VCFUX
:
16362 case ALTIVEC_BUILTIN_VCFSX
:
16363 case ALTIVEC_BUILTIN_VCTUXS
:
16364 case ALTIVEC_BUILTIN_VCTSXS
:
16365 /* FIXME: There's got to be a nicer way to handle this case than
16366 constructing a new CALL_EXPR. */
16367 if (call_expr_nargs (exp
) == 1)
16369 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16370 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16374 /* For the pack and unpack int128 routines, fix up the builtin so it
16375 uses the correct IBM128 type. */
16376 case MISC_BUILTIN_PACK_IF
:
16377 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
16379 icode
= CODE_FOR_packtf
;
16380 fcode
= MISC_BUILTIN_PACK_TF
;
16381 uns_fcode
= (size_t)fcode
;
16385 case MISC_BUILTIN_UNPACK_IF
:
16386 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
16388 icode
= CODE_FOR_unpacktf
;
16389 fcode
= MISC_BUILTIN_UNPACK_TF
;
16390 uns_fcode
= (size_t)fcode
;
16398 if (TARGET_ALTIVEC
)
16400 ret
= altivec_expand_builtin (exp
, target
, &success
);
16407 ret
= htm_expand_builtin (exp
, target
, &success
);
16413 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16414 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16415 gcc_assert (attr
== RS6000_BTC_UNARY
16416 || attr
== RS6000_BTC_BINARY
16417 || attr
== RS6000_BTC_TERNARY
16418 || attr
== RS6000_BTC_SPECIAL
);
16420 /* Handle simple unary operations. */
16422 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16423 if (d
->code
== fcode
)
16424 return rs6000_expand_unop_builtin (icode
, exp
, target
);
16426 /* Handle simple binary operations. */
16428 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16429 if (d
->code
== fcode
)
16430 return rs6000_expand_binop_builtin (icode
, exp
, target
);
16432 /* Handle simple ternary operations. */
16434 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16435 if (d
->code
== fcode
)
16436 return rs6000_expand_ternop_builtin (icode
, exp
, target
);
16438 /* Handle simple no-argument operations. */
16440 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16441 if (d
->code
== fcode
)
16442 return rs6000_expand_zeroop_builtin (icode
, target
);
16444 gcc_unreachable ();
16447 /* Create a builtin vector type with a name. Taking care not to give
16448 the canonical type a name. */
16451 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16453 tree result
= build_vector_type (elt_type
, num_elts
);
16455 /* Copy so we don't give the canonical type a name. */
16456 result
= build_variant_type_copy (result
);
16458 add_builtin_type (name
, result
);
16464 rs6000_init_builtins (void)
16470 if (TARGET_DEBUG_BUILTIN
)
16471 fprintf (stderr
, "rs6000_init_builtins%s%s\n",
16472 (TARGET_ALTIVEC
) ? ", altivec" : "",
16473 (TARGET_VSX
) ? ", vsx" : "");
16475 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16476 : "__vector long long",
16477 intDI_type_node
, 2);
16478 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16479 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16480 intSI_type_node
, 4);
16481 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16482 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16483 intHI_type_node
, 8);
16484 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16485 intQI_type_node
, 16);
16487 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16488 unsigned_intQI_type_node
, 16);
16489 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16490 unsigned_intHI_type_node
, 8);
16491 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16492 unsigned_intSI_type_node
, 4);
16493 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16494 ? "__vector unsigned long"
16495 : "__vector unsigned long long",
16496 unsigned_intDI_type_node
, 2);
16498 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16500 const_str_type_node
16501 = build_pointer_type (build_qualified_type (char_type_node
,
16504 /* We use V1TI mode as a special container to hold __int128_t items that
16505 must live in VSX registers. */
16506 if (intTI_type_node
)
16508 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16509 intTI_type_node
, 1);
16510 unsigned_V1TI_type_node
16511 = rs6000_vector_type ("__vector unsigned __int128",
16512 unsigned_intTI_type_node
, 1);
16515 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16516 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16517 'vector unsigned short'. */
16519 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16520 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16521 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16522 bool_long_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16523 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16525 long_integer_type_internal_node
= long_integer_type_node
;
16526 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16527 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16528 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16529 intQI_type_internal_node
= intQI_type_node
;
16530 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16531 intHI_type_internal_node
= intHI_type_node
;
16532 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16533 intSI_type_internal_node
= intSI_type_node
;
16534 uintSI_type_internal_node
= unsigned_intSI_type_node
;
16535 intDI_type_internal_node
= intDI_type_node
;
16536 uintDI_type_internal_node
= unsigned_intDI_type_node
;
16537 intTI_type_internal_node
= intTI_type_node
;
16538 uintTI_type_internal_node
= unsigned_intTI_type_node
;
16539 float_type_internal_node
= float_type_node
;
16540 double_type_internal_node
= double_type_node
;
16541 long_double_type_internal_node
= long_double_type_node
;
16542 dfloat64_type_internal_node
= dfloat64_type_node
;
16543 dfloat128_type_internal_node
= dfloat128_type_node
;
16544 void_type_internal_node
= void_type_node
;
16546 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16547 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16548 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16549 format that uses a pair of doubles, depending on the switches and
16552 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16553 floating point, we need make sure the type is non-zero or else self-test
16554 fails during bootstrap.
16556 Always create __ibm128 as a separate type, even if the current long double
16557 format is IBM extended double.
16559 For IEEE 128-bit floating point, always create the type __ieee128. If the
16560 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16562 if (TARGET_FLOAT128_TYPE
)
16564 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
)
16565 ibm128_float_type_node
= long_double_type_node
;
16568 ibm128_float_type_node
= make_node (REAL_TYPE
);
16569 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16570 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
16571 layout_type (ibm128_float_type_node
);
16574 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
16577 if (TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
)
16578 ieee128_float_type_node
= long_double_type_node
;
16580 ieee128_float_type_node
= float128_type_node
;
16582 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16587 ieee128_float_type_node
= ibm128_float_type_node
= long_double_type_node
;
16589 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16591 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
16592 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
16593 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
16594 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
16595 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
16596 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
16597 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
16598 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
16599 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
16600 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
16601 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
16602 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
16603 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
16604 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
16605 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
16606 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
16607 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
16608 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
16609 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
16610 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
16611 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
16612 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
16613 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
16614 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
16615 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
16616 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
16617 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
16619 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
16620 TYPE_NAME (bool_char_type_node
) = tdecl
;
16622 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
16623 TYPE_NAME (bool_short_type_node
) = tdecl
;
16625 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
16626 TYPE_NAME (bool_int_type_node
) = tdecl
;
16628 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
16629 TYPE_NAME (pixel_type_node
) = tdecl
;
16631 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
16632 bool_char_type_node
, 16);
16633 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
16634 bool_short_type_node
, 8);
16635 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
16636 bool_int_type_node
, 4);
16637 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16638 ? "__vector __bool long"
16639 : "__vector __bool long long",
16640 bool_long_long_type_node
, 2);
16641 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
16642 pixel_type_node
, 8);
16644 /* Create Altivec and VSX builtins on machines with at least the
16645 general purpose extensions (970 and newer) to allow the use of
16646 the target attribute. */
16647 if (TARGET_EXTRA_BUILTINS
)
16648 altivec_init_builtins ();
16650 htm_init_builtins ();
16652 if (TARGET_EXTRA_BUILTINS
)
16653 rs6000_common_init_builtins ();
16655 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
16656 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
16657 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
16659 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
16660 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
16661 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
16663 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
16664 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
16665 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
16667 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
16668 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
16669 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
16671 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
16672 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
16673 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
16674 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
16676 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16678 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
16681 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16684 ftype
= build_function_type_list (unsigned_intSI_type_node
,
16686 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
16688 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
16689 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
16691 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
16692 def_builtin ("__builtin_mffsl", ftype
, RS6000_BUILTIN_MFFSL
);
16694 ftype
= build_function_type_list (void_type_node
,
16697 def_builtin ("__builtin_mtfsb0", ftype
, RS6000_BUILTIN_MTFSB0
);
16699 ftype
= build_function_type_list (void_type_node
,
16702 def_builtin ("__builtin_mtfsb1", ftype
, RS6000_BUILTIN_MTFSB1
);
16704 ftype
= build_function_type_list (void_type_node
,
16707 def_builtin ("__builtin_set_fpscr_rn", ftype
, RS6000_BUILTIN_SET_FPSCR_RN
);
16709 ftype
= build_function_type_list (void_type_node
,
16712 def_builtin ("__builtin_set_fpscr_drn", ftype
, RS6000_BUILTIN_SET_FPSCR_DRN
);
16714 ftype
= build_function_type_list (void_type_node
,
16715 intSI_type_node
, double_type_node
,
16717 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
16719 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
16720 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
16721 def_builtin ("__builtin_ppc_speculation_barrier", ftype
,
16722 MISC_BUILTIN_SPEC_BARRIER
);
16724 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
16726 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
16727 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
16729 /* AIX libm provides clog as __clog. */
16730 if (TARGET_XCOFF
&&
16731 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
16732 set_user_assembler_name (tdecl
, "__clog");
16734 #ifdef SUBTARGET_INIT_BUILTINS
16735 SUBTARGET_INIT_BUILTINS
;
16739 /* Returns the rs6000 builtin decl for CODE. */
16742 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
16744 HOST_WIDE_INT fnmask
;
16746 if (code
>= RS6000_BUILTIN_COUNT
)
16747 return error_mark_node
;
16749 fnmask
= rs6000_builtin_info
[code
].mask
;
16750 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
16752 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
16753 return error_mark_node
;
16756 return rs6000_builtin_decls
[code
];
16760 altivec_init_builtins (void)
16762 const struct builtin_description
*d
;
16766 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
16768 tree pvoid_type_node
= build_pointer_type (void_type_node
);
16770 tree pcvoid_type_node
16771 = build_pointer_type (build_qualified_type (void_type_node
,
16774 tree int_ftype_opaque
16775 = build_function_type_list (integer_type_node
,
16776 opaque_V4SI_type_node
, NULL_TREE
);
16777 tree opaque_ftype_opaque
16778 = build_function_type_list (integer_type_node
, NULL_TREE
);
16779 tree opaque_ftype_opaque_int
16780 = build_function_type_list (opaque_V4SI_type_node
,
16781 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
16782 tree opaque_ftype_opaque_opaque_int
16783 = build_function_type_list (opaque_V4SI_type_node
,
16784 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
16785 integer_type_node
, NULL_TREE
);
16786 tree opaque_ftype_opaque_opaque_opaque
16787 = build_function_type_list (opaque_V4SI_type_node
,
16788 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
16789 opaque_V4SI_type_node
, NULL_TREE
);
16790 tree opaque_ftype_opaque_opaque
16791 = build_function_type_list (opaque_V4SI_type_node
,
16792 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
16794 tree int_ftype_int_opaque_opaque
16795 = build_function_type_list (integer_type_node
,
16796 integer_type_node
, opaque_V4SI_type_node
,
16797 opaque_V4SI_type_node
, NULL_TREE
);
16798 tree int_ftype_int_v4si_v4si
16799 = build_function_type_list (integer_type_node
,
16800 integer_type_node
, V4SI_type_node
,
16801 V4SI_type_node
, NULL_TREE
);
16802 tree int_ftype_int_v2di_v2di
16803 = build_function_type_list (integer_type_node
,
16804 integer_type_node
, V2DI_type_node
,
16805 V2DI_type_node
, NULL_TREE
);
16806 tree void_ftype_v4si
16807 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
16808 tree v8hi_ftype_void
16809 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
16810 tree void_ftype_void
16811 = build_function_type_list (void_type_node
, NULL_TREE
);
16812 tree void_ftype_int
16813 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
16815 tree opaque_ftype_long_pcvoid
16816 = build_function_type_list (opaque_V4SI_type_node
,
16817 long_integer_type_node
, pcvoid_type_node
,
16819 tree v16qi_ftype_long_pcvoid
16820 = build_function_type_list (V16QI_type_node
,
16821 long_integer_type_node
, pcvoid_type_node
,
16823 tree v8hi_ftype_long_pcvoid
16824 = build_function_type_list (V8HI_type_node
,
16825 long_integer_type_node
, pcvoid_type_node
,
16827 tree v4si_ftype_long_pcvoid
16828 = build_function_type_list (V4SI_type_node
,
16829 long_integer_type_node
, pcvoid_type_node
,
16831 tree v4sf_ftype_long_pcvoid
16832 = build_function_type_list (V4SF_type_node
,
16833 long_integer_type_node
, pcvoid_type_node
,
16835 tree v2df_ftype_long_pcvoid
16836 = build_function_type_list (V2DF_type_node
,
16837 long_integer_type_node
, pcvoid_type_node
,
16839 tree v2di_ftype_long_pcvoid
16840 = build_function_type_list (V2DI_type_node
,
16841 long_integer_type_node
, pcvoid_type_node
,
16843 tree v1ti_ftype_long_pcvoid
16844 = build_function_type_list (V1TI_type_node
,
16845 long_integer_type_node
, pcvoid_type_node
,
16848 tree void_ftype_opaque_long_pvoid
16849 = build_function_type_list (void_type_node
,
16850 opaque_V4SI_type_node
, long_integer_type_node
,
16851 pvoid_type_node
, NULL_TREE
);
16852 tree void_ftype_v4si_long_pvoid
16853 = build_function_type_list (void_type_node
,
16854 V4SI_type_node
, long_integer_type_node
,
16855 pvoid_type_node
, NULL_TREE
);
16856 tree void_ftype_v16qi_long_pvoid
16857 = build_function_type_list (void_type_node
,
16858 V16QI_type_node
, long_integer_type_node
,
16859 pvoid_type_node
, NULL_TREE
);
16861 tree void_ftype_v16qi_pvoid_long
16862 = build_function_type_list (void_type_node
,
16863 V16QI_type_node
, pvoid_type_node
,
16864 long_integer_type_node
, NULL_TREE
);
16866 tree void_ftype_v8hi_long_pvoid
16867 = build_function_type_list (void_type_node
,
16868 V8HI_type_node
, long_integer_type_node
,
16869 pvoid_type_node
, NULL_TREE
);
16870 tree void_ftype_v4sf_long_pvoid
16871 = build_function_type_list (void_type_node
,
16872 V4SF_type_node
, long_integer_type_node
,
16873 pvoid_type_node
, NULL_TREE
);
16874 tree void_ftype_v2df_long_pvoid
16875 = build_function_type_list (void_type_node
,
16876 V2DF_type_node
, long_integer_type_node
,
16877 pvoid_type_node
, NULL_TREE
);
16878 tree void_ftype_v1ti_long_pvoid
16879 = build_function_type_list (void_type_node
,
16880 V1TI_type_node
, long_integer_type_node
,
16881 pvoid_type_node
, NULL_TREE
);
16882 tree void_ftype_v2di_long_pvoid
16883 = build_function_type_list (void_type_node
,
16884 V2DI_type_node
, long_integer_type_node
,
16885 pvoid_type_node
, NULL_TREE
);
16886 tree int_ftype_int_v8hi_v8hi
16887 = build_function_type_list (integer_type_node
,
16888 integer_type_node
, V8HI_type_node
,
16889 V8HI_type_node
, NULL_TREE
);
16890 tree int_ftype_int_v16qi_v16qi
16891 = build_function_type_list (integer_type_node
,
16892 integer_type_node
, V16QI_type_node
,
16893 V16QI_type_node
, NULL_TREE
);
16894 tree int_ftype_int_v4sf_v4sf
16895 = build_function_type_list (integer_type_node
,
16896 integer_type_node
, V4SF_type_node
,
16897 V4SF_type_node
, NULL_TREE
);
16898 tree int_ftype_int_v2df_v2df
16899 = build_function_type_list (integer_type_node
,
16900 integer_type_node
, V2DF_type_node
,
16901 V2DF_type_node
, NULL_TREE
);
16902 tree v2di_ftype_v2di
16903 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
16904 tree v4si_ftype_v4si
16905 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
16906 tree v8hi_ftype_v8hi
16907 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
16908 tree v16qi_ftype_v16qi
16909 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
16910 tree v4sf_ftype_v4sf
16911 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
16912 tree v2df_ftype_v2df
16913 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
16914 tree void_ftype_pcvoid_int_int
16915 = build_function_type_list (void_type_node
,
16916 pcvoid_type_node
, integer_type_node
,
16917 integer_type_node
, NULL_TREE
);
16919 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
16920 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
16921 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
16922 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
16923 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
16924 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
16925 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
16926 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
16927 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
16928 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
16929 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
16930 ALTIVEC_BUILTIN_LVXL_V2DF
);
16931 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
16932 ALTIVEC_BUILTIN_LVXL_V2DI
);
16933 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
16934 ALTIVEC_BUILTIN_LVXL_V4SF
);
16935 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
16936 ALTIVEC_BUILTIN_LVXL_V4SI
);
16937 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
16938 ALTIVEC_BUILTIN_LVXL_V8HI
);
16939 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
16940 ALTIVEC_BUILTIN_LVXL_V16QI
);
16941 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
16942 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid
,
16943 ALTIVEC_BUILTIN_LVX_V1TI
);
16944 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
16945 ALTIVEC_BUILTIN_LVX_V2DF
);
16946 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
16947 ALTIVEC_BUILTIN_LVX_V2DI
);
16948 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
16949 ALTIVEC_BUILTIN_LVX_V4SF
);
16950 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
16951 ALTIVEC_BUILTIN_LVX_V4SI
);
16952 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
16953 ALTIVEC_BUILTIN_LVX_V8HI
);
16954 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
16955 ALTIVEC_BUILTIN_LVX_V16QI
);
16956 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
16957 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
16958 ALTIVEC_BUILTIN_STVX_V2DF
);
16959 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
16960 ALTIVEC_BUILTIN_STVX_V2DI
);
16961 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
16962 ALTIVEC_BUILTIN_STVX_V4SF
);
16963 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
16964 ALTIVEC_BUILTIN_STVX_V4SI
);
16965 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
16966 ALTIVEC_BUILTIN_STVX_V8HI
);
16967 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
16968 ALTIVEC_BUILTIN_STVX_V16QI
);
16969 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
16970 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
16971 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
16972 ALTIVEC_BUILTIN_STVXL_V2DF
);
16973 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
16974 ALTIVEC_BUILTIN_STVXL_V2DI
);
16975 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
16976 ALTIVEC_BUILTIN_STVXL_V4SF
);
16977 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
16978 ALTIVEC_BUILTIN_STVXL_V4SI
);
16979 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
16980 ALTIVEC_BUILTIN_STVXL_V8HI
);
16981 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
16982 ALTIVEC_BUILTIN_STVXL_V16QI
);
16983 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
16984 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
16985 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
16986 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
16987 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
16988 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
16989 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
16990 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
16991 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
16992 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
16993 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
16994 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
16995 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
16996 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
16997 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
16998 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
17000 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
17001 VSX_BUILTIN_LXVD2X_V2DF
);
17002 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
17003 VSX_BUILTIN_LXVD2X_V2DI
);
17004 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
17005 VSX_BUILTIN_LXVW4X_V4SF
);
17006 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
17007 VSX_BUILTIN_LXVW4X_V4SI
);
17008 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
17009 VSX_BUILTIN_LXVW4X_V8HI
);
17010 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
17011 VSX_BUILTIN_LXVW4X_V16QI
);
17012 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
17013 VSX_BUILTIN_STXVD2X_V2DF
);
17014 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
17015 VSX_BUILTIN_STXVD2X_V2DI
);
17016 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
17017 VSX_BUILTIN_STXVW4X_V4SF
);
17018 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
17019 VSX_BUILTIN_STXVW4X_V4SI
);
17020 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
17021 VSX_BUILTIN_STXVW4X_V8HI
);
17022 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
17023 VSX_BUILTIN_STXVW4X_V16QI
);
17025 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
17026 VSX_BUILTIN_LD_ELEMREV_V2DF
);
17027 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
17028 VSX_BUILTIN_LD_ELEMREV_V2DI
);
17029 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
17030 VSX_BUILTIN_LD_ELEMREV_V4SF
);
17031 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
17032 VSX_BUILTIN_LD_ELEMREV_V4SI
);
17033 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
17034 VSX_BUILTIN_LD_ELEMREV_V8HI
);
17035 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
17036 VSX_BUILTIN_LD_ELEMREV_V16QI
);
17037 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
17038 VSX_BUILTIN_ST_ELEMREV_V2DF
);
17039 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid
,
17040 VSX_BUILTIN_ST_ELEMREV_V1TI
);
17041 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
17042 VSX_BUILTIN_ST_ELEMREV_V2DI
);
17043 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
17044 VSX_BUILTIN_ST_ELEMREV_V4SF
);
17045 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
17046 VSX_BUILTIN_ST_ELEMREV_V4SI
);
17047 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid
,
17048 VSX_BUILTIN_ST_ELEMREV_V8HI
);
17049 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid
,
17050 VSX_BUILTIN_ST_ELEMREV_V16QI
);
17052 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
17053 VSX_BUILTIN_VEC_LD
);
17054 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
17055 VSX_BUILTIN_VEC_ST
);
17056 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
17057 VSX_BUILTIN_VEC_XL
);
17058 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
17059 VSX_BUILTIN_VEC_XL_BE
);
17060 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
17061 VSX_BUILTIN_VEC_XST
);
17062 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid
,
17063 VSX_BUILTIN_VEC_XST_BE
);
17065 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
17066 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
17067 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
17069 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
17070 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
17071 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
17072 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
17073 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
17074 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
17075 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
17076 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
17077 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
17078 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
17079 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
17080 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
17082 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
17083 ALTIVEC_BUILTIN_VEC_ADDE
);
17084 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
17085 ALTIVEC_BUILTIN_VEC_ADDEC
);
17086 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
17087 ALTIVEC_BUILTIN_VEC_CMPNE
);
17088 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
17089 ALTIVEC_BUILTIN_VEC_MUL
);
17090 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
17091 ALTIVEC_BUILTIN_VEC_SUBE
);
17092 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
17093 ALTIVEC_BUILTIN_VEC_SUBEC
);
17095 /* Cell builtins. */
17096 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
17097 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
17098 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
17099 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
17101 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
17102 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
17103 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
17104 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
17106 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
17107 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
17108 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
17109 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
17111 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
17112 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
17113 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
17114 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
17116 if (TARGET_P9_VECTOR
)
17118 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
17119 P9V_BUILTIN_STXVL
);
17120 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long
,
17121 P9V_BUILTIN_XST_LEN_R
);
17124 /* Add the DST variants. */
17126 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
17128 HOST_WIDE_INT mask
= d
->mask
;
17130 /* It is expected that these dst built-in functions may have
17131 d->icode equal to CODE_FOR_nothing. */
17132 if ((mask
& builtin_mask
) != mask
)
17134 if (TARGET_DEBUG_BUILTIN
)
17135 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
17139 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
17142 /* Initialize the predicates. */
17143 d
= bdesc_altivec_preds
;
17144 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
17146 machine_mode mode1
;
17148 HOST_WIDE_INT mask
= d
->mask
;
17150 if ((mask
& builtin_mask
) != mask
)
17152 if (TARGET_DEBUG_BUILTIN
)
17153 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
17158 if (rs6000_overloaded_builtin_p (d
->code
))
17162 /* Cannot define builtin if the instruction is disabled. */
17163 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17164 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17170 type
= int_ftype_int_opaque_opaque
;
17173 type
= int_ftype_int_v2di_v2di
;
17176 type
= int_ftype_int_v4si_v4si
;
17179 type
= int_ftype_int_v8hi_v8hi
;
17182 type
= int_ftype_int_v16qi_v16qi
;
17185 type
= int_ftype_int_v4sf_v4sf
;
17188 type
= int_ftype_int_v2df_v2df
;
17191 gcc_unreachable ();
17194 def_builtin (d
->name
, type
, d
->code
);
17197 /* Initialize the abs* operators. */
17199 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17201 machine_mode mode0
;
17203 HOST_WIDE_INT mask
= d
->mask
;
17205 if ((mask
& builtin_mask
) != mask
)
17207 if (TARGET_DEBUG_BUILTIN
)
17208 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17213 /* Cannot define builtin if the instruction is disabled. */
17214 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17215 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17220 type
= v2di_ftype_v2di
;
17223 type
= v4si_ftype_v4si
;
17226 type
= v8hi_ftype_v8hi
;
17229 type
= v16qi_ftype_v16qi
;
17232 type
= v4sf_ftype_v4sf
;
17235 type
= v2df_ftype_v2df
;
17238 gcc_unreachable ();
17241 def_builtin (d
->name
, type
, d
->code
);
17244 /* Initialize target builtin that implements
17245 targetm.vectorize.builtin_mask_for_load. */
17247 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17248 v16qi_ftype_long_pcvoid
,
17249 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17250 BUILT_IN_MD
, NULL
, NULL_TREE
);
17251 TREE_READONLY (decl
) = 1;
17252 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17253 altivec_builtin_mask_for_load
= decl
;
17255 /* Access to the vec_init patterns. */
17256 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17257 integer_type_node
, integer_type_node
,
17258 integer_type_node
, NULL_TREE
);
17259 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17261 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17262 short_integer_type_node
,
17263 short_integer_type_node
,
17264 short_integer_type_node
,
17265 short_integer_type_node
,
17266 short_integer_type_node
,
17267 short_integer_type_node
,
17268 short_integer_type_node
, NULL_TREE
);
17269 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17271 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17272 char_type_node
, char_type_node
,
17273 char_type_node
, char_type_node
,
17274 char_type_node
, char_type_node
,
17275 char_type_node
, char_type_node
,
17276 char_type_node
, char_type_node
,
17277 char_type_node
, char_type_node
,
17278 char_type_node
, char_type_node
,
17279 char_type_node
, NULL_TREE
);
17280 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17281 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17283 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17284 float_type_node
, float_type_node
,
17285 float_type_node
, NULL_TREE
);
17286 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17288 /* VSX builtins. */
17289 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17290 double_type_node
, NULL_TREE
);
17291 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17293 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17294 intDI_type_node
, NULL_TREE
);
17295 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17297 /* Access to the vec_set patterns. */
17298 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17300 integer_type_node
, NULL_TREE
);
17301 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17303 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17305 integer_type_node
, NULL_TREE
);
17306 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17308 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17310 integer_type_node
, NULL_TREE
);
17311 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17313 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17315 integer_type_node
, NULL_TREE
);
17316 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17318 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17320 integer_type_node
, NULL_TREE
);
17321 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17323 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17325 integer_type_node
, NULL_TREE
);
17326 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17328 /* Access to the vec_extract patterns. */
17329 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17330 integer_type_node
, NULL_TREE
);
17331 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17333 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17334 integer_type_node
, NULL_TREE
);
17335 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17337 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17338 integer_type_node
, NULL_TREE
);
17339 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17341 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17342 integer_type_node
, NULL_TREE
);
17343 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17345 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17346 integer_type_node
, NULL_TREE
);
17347 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17349 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17350 integer_type_node
, NULL_TREE
);
17351 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17354 if (V1TI_type_node
)
17356 tree v1ti_ftype_long_pcvoid
17357 = build_function_type_list (V1TI_type_node
,
17358 long_integer_type_node
, pcvoid_type_node
,
17360 tree void_ftype_v1ti_long_pvoid
17361 = build_function_type_list (void_type_node
,
17362 V1TI_type_node
, long_integer_type_node
,
17363 pvoid_type_node
, NULL_TREE
);
17364 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid
,
17365 VSX_BUILTIN_LD_ELEMREV_V1TI
);
17366 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17367 VSX_BUILTIN_LXVD2X_V1TI
);
17368 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17369 VSX_BUILTIN_STXVD2X_V1TI
);
17370 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17371 NULL_TREE
, NULL_TREE
);
17372 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17373 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17375 integer_type_node
, NULL_TREE
);
17376 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17377 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17378 integer_type_node
, NULL_TREE
);
17379 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17385 htm_init_builtins (void)
17387 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17388 const struct builtin_description
*d
;
17392 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17394 tree op
[MAX_HTM_OPERANDS
], type
;
17395 HOST_WIDE_INT mask
= d
->mask
;
17396 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17397 bool void_func
= (attr
& RS6000_BTC_VOID
);
17398 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17400 tree gpr_type_node
;
17404 /* It is expected that these htm built-in functions may have
17405 d->icode equal to CODE_FOR_nothing. */
17407 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17408 gpr_type_node
= long_long_unsigned_type_node
;
17410 gpr_type_node
= long_unsigned_type_node
;
17412 if (attr
& RS6000_BTC_SPR
)
17414 rettype
= gpr_type_node
;
17415 argtype
= gpr_type_node
;
17417 else if (d
->code
== HTM_BUILTIN_TABORTDC
17418 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17420 rettype
= unsigned_type_node
;
17421 argtype
= gpr_type_node
;
17425 rettype
= unsigned_type_node
;
17426 argtype
= unsigned_type_node
;
17429 if ((mask
& builtin_mask
) != mask
)
17431 if (TARGET_DEBUG_BUILTIN
)
17432 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17438 if (TARGET_DEBUG_BUILTIN
)
17439 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17440 (long unsigned) i
);
17444 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17446 if (attr_args
== RS6000_BTC_UNARY
)
17447 op
[nopnds
++] = argtype
;
17448 else if (attr_args
== RS6000_BTC_BINARY
)
17450 op
[nopnds
++] = argtype
;
17451 op
[nopnds
++] = argtype
;
17453 else if (attr_args
== RS6000_BTC_TERNARY
)
17455 op
[nopnds
++] = argtype
;
17456 op
[nopnds
++] = argtype
;
17457 op
[nopnds
++] = argtype
;
17463 type
= build_function_type_list (op
[0], NULL_TREE
);
17466 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17469 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17472 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17476 gcc_unreachable ();
17479 def_builtin (d
->name
, type
, d
->code
);
17483 /* Hash function for builtin functions with up to 3 arguments and a return
17486 builtin_hasher::hash (builtin_hash_struct
*bh
)
17491 for (i
= 0; i
< 4; i
++)
17493 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17494 ret
= (ret
* 2) + bh
->uns_p
[i
];
17500 /* Compare builtin hash entries H1 and H2 for equivalence. */
17502 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
17504 return ((p1
->mode
[0] == p2
->mode
[0])
17505 && (p1
->mode
[1] == p2
->mode
[1])
17506 && (p1
->mode
[2] == p2
->mode
[2])
17507 && (p1
->mode
[3] == p2
->mode
[3])
17508 && (p1
->uns_p
[0] == p2
->uns_p
[0])
17509 && (p1
->uns_p
[1] == p2
->uns_p
[1])
17510 && (p1
->uns_p
[2] == p2
->uns_p
[2])
17511 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
17514 /* Map types for builtin functions with an explicit return type and up to 3
17515 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17516 of the argument. */
17518 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
17519 machine_mode mode_arg1
, machine_mode mode_arg2
,
17520 enum rs6000_builtins builtin
, const char *name
)
17522 struct builtin_hash_struct h
;
17523 struct builtin_hash_struct
*h2
;
17526 tree ret_type
= NULL_TREE
;
17527 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
17529 /* Create builtin_hash_table. */
17530 if (builtin_hash_table
== NULL
)
17531 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
17533 h
.type
= NULL_TREE
;
17534 h
.mode
[0] = mode_ret
;
17535 h
.mode
[1] = mode_arg0
;
17536 h
.mode
[2] = mode_arg1
;
17537 h
.mode
[3] = mode_arg2
;
17543 /* If the builtin is a type that produces unsigned results or takes unsigned
17544 arguments, and it is returned as a decl for the vectorizer (such as
17545 widening multiplies, permute), make sure the arguments and return value
17546 are type correct. */
17549 /* unsigned 1 argument functions. */
17550 case CRYPTO_BUILTIN_VSBOX
:
17551 case P8V_BUILTIN_VGBBD
:
17552 case MISC_BUILTIN_CDTBCD
:
17553 case MISC_BUILTIN_CBCDTD
:
17558 /* unsigned 2 argument functions. */
17559 case ALTIVEC_BUILTIN_VMULEUB
:
17560 case ALTIVEC_BUILTIN_VMULEUH
:
17561 case P8V_BUILTIN_VMULEUW
:
17562 case ALTIVEC_BUILTIN_VMULOUB
:
17563 case ALTIVEC_BUILTIN_VMULOUH
:
17564 case P8V_BUILTIN_VMULOUW
:
17565 case CRYPTO_BUILTIN_VCIPHER
:
17566 case CRYPTO_BUILTIN_VCIPHERLAST
:
17567 case CRYPTO_BUILTIN_VNCIPHER
:
17568 case CRYPTO_BUILTIN_VNCIPHERLAST
:
17569 case CRYPTO_BUILTIN_VPMSUMB
:
17570 case CRYPTO_BUILTIN_VPMSUMH
:
17571 case CRYPTO_BUILTIN_VPMSUMW
:
17572 case CRYPTO_BUILTIN_VPMSUMD
:
17573 case CRYPTO_BUILTIN_VPMSUM
:
17574 case MISC_BUILTIN_ADDG6S
:
17575 case MISC_BUILTIN_DIVWEU
:
17576 case MISC_BUILTIN_DIVDEU
:
17577 case VSX_BUILTIN_UDIV_V2DI
:
17578 case ALTIVEC_BUILTIN_VMAXUB
:
17579 case ALTIVEC_BUILTIN_VMINUB
:
17580 case ALTIVEC_BUILTIN_VMAXUH
:
17581 case ALTIVEC_BUILTIN_VMINUH
:
17582 case ALTIVEC_BUILTIN_VMAXUW
:
17583 case ALTIVEC_BUILTIN_VMINUW
:
17584 case P8V_BUILTIN_VMAXUD
:
17585 case P8V_BUILTIN_VMINUD
:
17591 /* unsigned 3 argument functions. */
17592 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
17593 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
17594 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
17595 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
17596 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
17597 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
17598 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
17599 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
17600 case VSX_BUILTIN_VPERM_16QI_UNS
:
17601 case VSX_BUILTIN_VPERM_8HI_UNS
:
17602 case VSX_BUILTIN_VPERM_4SI_UNS
:
17603 case VSX_BUILTIN_VPERM_2DI_UNS
:
17604 case VSX_BUILTIN_XXSEL_16QI_UNS
:
17605 case VSX_BUILTIN_XXSEL_8HI_UNS
:
17606 case VSX_BUILTIN_XXSEL_4SI_UNS
:
17607 case VSX_BUILTIN_XXSEL_2DI_UNS
:
17608 case CRYPTO_BUILTIN_VPERMXOR
:
17609 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
17610 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
17611 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
17612 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
17613 case CRYPTO_BUILTIN_VSHASIGMAW
:
17614 case CRYPTO_BUILTIN_VSHASIGMAD
:
17615 case CRYPTO_BUILTIN_VSHASIGMA
:
17622 /* signed permute functions with unsigned char mask. */
17623 case ALTIVEC_BUILTIN_VPERM_16QI
:
17624 case ALTIVEC_BUILTIN_VPERM_8HI
:
17625 case ALTIVEC_BUILTIN_VPERM_4SI
:
17626 case ALTIVEC_BUILTIN_VPERM_4SF
:
17627 case ALTIVEC_BUILTIN_VPERM_2DI
:
17628 case ALTIVEC_BUILTIN_VPERM_2DF
:
17629 case VSX_BUILTIN_VPERM_16QI
:
17630 case VSX_BUILTIN_VPERM_8HI
:
17631 case VSX_BUILTIN_VPERM_4SI
:
17632 case VSX_BUILTIN_VPERM_4SF
:
17633 case VSX_BUILTIN_VPERM_2DI
:
17634 case VSX_BUILTIN_VPERM_2DF
:
17638 /* unsigned args, signed return. */
17639 case VSX_BUILTIN_XVCVUXDSP
:
17640 case VSX_BUILTIN_XVCVUXDDP_UNS
:
17641 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
17645 /* signed args, unsigned return. */
17646 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
17647 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
17648 case MISC_BUILTIN_UNPACK_TD
:
17649 case MISC_BUILTIN_UNPACK_V1TI
:
17653 /* unsigned arguments, bool return (compares). */
17654 case ALTIVEC_BUILTIN_VCMPEQUB
:
17655 case ALTIVEC_BUILTIN_VCMPEQUH
:
17656 case ALTIVEC_BUILTIN_VCMPEQUW
:
17657 case P8V_BUILTIN_VCMPEQUD
:
17658 case VSX_BUILTIN_CMPGE_U16QI
:
17659 case VSX_BUILTIN_CMPGE_U8HI
:
17660 case VSX_BUILTIN_CMPGE_U4SI
:
17661 case VSX_BUILTIN_CMPGE_U2DI
:
17662 case ALTIVEC_BUILTIN_VCMPGTUB
:
17663 case ALTIVEC_BUILTIN_VCMPGTUH
:
17664 case ALTIVEC_BUILTIN_VCMPGTUW
:
17665 case P8V_BUILTIN_VCMPGTUD
:
17670 /* unsigned arguments for 128-bit pack instructions. */
17671 case MISC_BUILTIN_PACK_TD
:
17672 case MISC_BUILTIN_PACK_V1TI
:
17677 /* unsigned second arguments (vector shift right). */
17678 case ALTIVEC_BUILTIN_VSRB
:
17679 case ALTIVEC_BUILTIN_VSRH
:
17680 case ALTIVEC_BUILTIN_VSRW
:
17681 case P8V_BUILTIN_VSRD
:
17689 /* Figure out how many args are present. */
17690 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
17693 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
17694 if (!ret_type
&& h
.uns_p
[0])
17695 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
17698 fatal_error (input_location
,
17699 "internal error: builtin function %qs had an unexpected "
17700 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
17702 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
17703 arg_type
[i
] = NULL_TREE
;
17705 for (i
= 0; i
< num_args
; i
++)
17707 int m
= (int) h
.mode
[i
+1];
17708 int uns_p
= h
.uns_p
[i
+1];
17710 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
17711 if (!arg_type
[i
] && uns_p
)
17712 arg_type
[i
] = builtin_mode_to_type
[m
][0];
17715 fatal_error (input_location
,
17716 "internal error: builtin function %qs, argument %d "
17717 "had unexpected argument type %qs", name
, i
,
17718 GET_MODE_NAME (m
));
17721 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
17722 if (*found
== NULL
)
17724 h2
= ggc_alloc
<builtin_hash_struct
> ();
17728 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
17729 arg_type
[2], NULL_TREE
);
17732 return (*found
)->type
;
17736 rs6000_common_init_builtins (void)
17738 const struct builtin_description
*d
;
17741 tree opaque_ftype_opaque
= NULL_TREE
;
17742 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
17743 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
17744 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17746 /* Create Altivec and VSX builtins on machines with at least the
17747 general purpose extensions (970 and newer) to allow the use of
17748 the target attribute. */
17750 if (TARGET_EXTRA_BUILTINS
)
17751 builtin_mask
|= RS6000_BTM_COMMON
;
17753 /* Add the ternary operators. */
17755 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
17758 HOST_WIDE_INT mask
= d
->mask
;
17760 if ((mask
& builtin_mask
) != mask
)
17762 if (TARGET_DEBUG_BUILTIN
)
17763 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
17767 if (rs6000_overloaded_builtin_p (d
->code
))
17769 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
17770 type
= opaque_ftype_opaque_opaque_opaque
17771 = build_function_type_list (opaque_V4SI_type_node
,
17772 opaque_V4SI_type_node
,
17773 opaque_V4SI_type_node
,
17774 opaque_V4SI_type_node
,
17779 enum insn_code icode
= d
->icode
;
17782 if (TARGET_DEBUG_BUILTIN
)
17783 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17789 if (icode
== CODE_FOR_nothing
)
17791 if (TARGET_DEBUG_BUILTIN
)
17792 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
17798 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
17799 insn_data
[icode
].operand
[1].mode
,
17800 insn_data
[icode
].operand
[2].mode
,
17801 insn_data
[icode
].operand
[3].mode
,
17805 def_builtin (d
->name
, type
, d
->code
);
17808 /* Add the binary operators. */
17810 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
17812 machine_mode mode0
, mode1
, mode2
;
17814 HOST_WIDE_INT mask
= d
->mask
;
17816 if ((mask
& builtin_mask
) != mask
)
17818 if (TARGET_DEBUG_BUILTIN
)
17819 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
17823 if (rs6000_overloaded_builtin_p (d
->code
))
17825 if (! (type
= opaque_ftype_opaque_opaque
))
17826 type
= opaque_ftype_opaque_opaque
17827 = build_function_type_list (opaque_V4SI_type_node
,
17828 opaque_V4SI_type_node
,
17829 opaque_V4SI_type_node
,
17834 enum insn_code icode
= d
->icode
;
17837 if (TARGET_DEBUG_BUILTIN
)
17838 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17844 if (icode
== CODE_FOR_nothing
)
17846 if (TARGET_DEBUG_BUILTIN
)
17847 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
17853 mode0
= insn_data
[icode
].operand
[0].mode
;
17854 mode1
= insn_data
[icode
].operand
[1].mode
;
17855 mode2
= insn_data
[icode
].operand
[2].mode
;
17857 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
17861 def_builtin (d
->name
, type
, d
->code
);
17864 /* Add the simple unary operators. */
17866 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
17868 machine_mode mode0
, mode1
;
17870 HOST_WIDE_INT mask
= d
->mask
;
17872 if ((mask
& builtin_mask
) != mask
)
17874 if (TARGET_DEBUG_BUILTIN
)
17875 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
17879 if (rs6000_overloaded_builtin_p (d
->code
))
17881 if (! (type
= opaque_ftype_opaque
))
17882 type
= opaque_ftype_opaque
17883 = build_function_type_list (opaque_V4SI_type_node
,
17884 opaque_V4SI_type_node
,
17889 enum insn_code icode
= d
->icode
;
17892 if (TARGET_DEBUG_BUILTIN
)
17893 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17899 if (icode
== CODE_FOR_nothing
)
17901 if (TARGET_DEBUG_BUILTIN
)
17902 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
17908 mode0
= insn_data
[icode
].operand
[0].mode
;
17909 mode1
= insn_data
[icode
].operand
[1].mode
;
17911 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
17915 def_builtin (d
->name
, type
, d
->code
);
17918 /* Add the simple no-argument operators. */
17920 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
17922 machine_mode mode0
;
17924 HOST_WIDE_INT mask
= d
->mask
;
17926 if ((mask
& builtin_mask
) != mask
)
17928 if (TARGET_DEBUG_BUILTIN
)
17929 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
17932 if (rs6000_overloaded_builtin_p (d
->code
))
17934 if (!opaque_ftype_opaque
)
17935 opaque_ftype_opaque
17936 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
17937 type
= opaque_ftype_opaque
;
17941 enum insn_code icode
= d
->icode
;
17944 if (TARGET_DEBUG_BUILTIN
)
17945 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17946 (long unsigned) i
);
17949 if (icode
== CODE_FOR_nothing
)
17951 if (TARGET_DEBUG_BUILTIN
)
17953 "rs6000_builtin, skip no-argument %s (no code)\n",
17957 mode0
= insn_data
[icode
].operand
[0].mode
;
17958 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
17961 def_builtin (d
->name
, type
, d
->code
);
17965 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17967 init_float128_ibm (machine_mode mode
)
17969 if (!TARGET_XL_COMPAT
)
17971 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
17972 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
17973 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
17974 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
17976 if (!TARGET_HARD_FLOAT
)
17978 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
17979 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
17980 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
17981 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
17982 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
17983 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
17984 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
17985 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
17987 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
17988 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
17989 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
17990 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
17991 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
17992 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
17993 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
17994 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
17999 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
18000 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
18001 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
18002 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
18005 /* Add various conversions for IFmode to use the traditional TFmode
18007 if (mode
== IFmode
)
18009 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf");
18010 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf");
18011 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctdtf");
18012 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd");
18013 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd");
18014 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtftd");
18016 if (TARGET_POWERPC64
)
18018 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
18019 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
18020 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
18021 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
18026 /* Create a decl for either complex long double multiply or complex long double
18027 divide when long double is IEEE 128-bit floating point. We can't use
18028 __multc3 and __divtc3 because the original long double using IBM extended
18029 double used those names. The complex multiply/divide functions are encoded
18030 as builtin functions with a complex result and 4 scalar inputs. */
18033 create_complex_muldiv (const char *name
, built_in_function fncode
, tree fntype
)
18035 tree fndecl
= add_builtin_function (name
, fntype
, fncode
, BUILT_IN_NORMAL
,
18038 set_builtin_decl (fncode
, fndecl
, true);
18040 if (TARGET_DEBUG_BUILTIN
)
18041 fprintf (stderr
, "create complex %s, fncode: %d\n", name
, (int) fncode
);
18046 /* Set up IEEE 128-bit floating point routines. Use different names if the
18047 arguments can be passed in a vector register. The historical PowerPC
18048 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18049 continue to use that if we aren't using vector registers to pass IEEE
18050 128-bit floating point. */
18053 init_float128_ieee (machine_mode mode
)
18055 if (FLOAT128_VECTOR_P (mode
))
18057 static bool complex_muldiv_init_p
= false;
18059 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18060 we have clone or target attributes, this will be called a second
18061 time. We want to create the built-in function only once. */
18062 if (mode
== TFmode
&& TARGET_IEEEQUAD
&& !complex_muldiv_init_p
)
18064 complex_muldiv_init_p
= true;
18065 built_in_function fncode_mul
=
18066 (built_in_function
) (BUILT_IN_COMPLEX_MUL_MIN
+ TCmode
18067 - MIN_MODE_COMPLEX_FLOAT
);
18068 built_in_function fncode_div
=
18069 (built_in_function
) (BUILT_IN_COMPLEX_DIV_MIN
+ TCmode
18070 - MIN_MODE_COMPLEX_FLOAT
);
18072 tree fntype
= build_function_type_list (complex_long_double_type_node
,
18073 long_double_type_node
,
18074 long_double_type_node
,
18075 long_double_type_node
,
18076 long_double_type_node
,
18079 create_complex_muldiv ("__mulkc3", fncode_mul
, fntype
);
18080 create_complex_muldiv ("__divkc3", fncode_div
, fntype
);
18083 set_optab_libfunc (add_optab
, mode
, "__addkf3");
18084 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
18085 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
18086 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
18087 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
18088 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
18089 set_optab_libfunc (abs_optab
, mode
, "__abskf2");
18090 set_optab_libfunc (powi_optab
, mode
, "__powikf2");
18092 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
18093 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
18094 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
18095 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
18096 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
18097 set_optab_libfunc (le_optab
, mode
, "__lekf2");
18098 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
18100 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
18101 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
18102 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
18103 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
18105 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__trunctfkf2");
18106 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18107 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__trunctfkf2");
18109 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__extendkftf2");
18110 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18111 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__extendkftf2");
18113 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf");
18114 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf");
18115 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctdkf");
18116 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd");
18117 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd");
18118 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendkftd");
18120 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
18121 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
18122 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
18123 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
18125 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
18126 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
18127 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
18128 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
18130 if (TARGET_POWERPC64
)
18132 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
18133 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
18134 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
18135 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
18141 set_optab_libfunc (add_optab
, mode
, "_q_add");
18142 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
18143 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
18144 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
18145 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
18146 if (TARGET_PPC_GPOPT
)
18147 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
18149 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
18150 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
18151 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
18152 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
18153 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
18154 set_optab_libfunc (le_optab
, mode
, "_q_fle");
18156 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
18157 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
18158 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18159 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18160 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18161 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18162 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18163 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18168 rs6000_init_libfuncs (void)
18170 /* __float128 support. */
18171 if (TARGET_FLOAT128_TYPE
)
18173 init_float128_ibm (IFmode
);
18174 init_float128_ieee (KFmode
);
18177 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18178 if (TARGET_LONG_DOUBLE_128
)
18180 if (!TARGET_IEEEQUAD
)
18181 init_float128_ibm (TFmode
);
18183 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18185 init_float128_ieee (TFmode
);
18189 /* Emit a potentially record-form instruction, setting DST from SRC.
18190 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18191 signed comparison of DST with zero. If DOT is 1, the generated RTL
18192 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18193 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18194 a separate COMPARE. */
18197 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18201 emit_move_insn (dst
, src
);
18205 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18207 emit_move_insn (dst
, src
);
18208 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18212 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18215 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18216 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18220 rtx set
= gen_rtx_SET (dst
, src
);
18221 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18226 /* A validation routine: say whether CODE, a condition code, and MODE
18227 match. The other alternatives either don't make sense or should
18228 never be generated. */
18231 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18233 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18234 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18235 && GET_MODE_CLASS (mode
) == MODE_CC
);
18237 /* These don't make sense. */
18238 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18239 || mode
!= CCUNSmode
);
18241 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18242 || mode
== CCUNSmode
);
18244 gcc_assert (mode
== CCFPmode
18245 || (code
!= ORDERED
&& code
!= UNORDERED
18246 && code
!= UNEQ
&& code
!= LTGT
18247 && code
!= UNGT
&& code
!= UNLT
18248 && code
!= UNGE
&& code
!= UNLE
));
18250 /* These should never be generated except for
18251 flag_finite_math_only. */
18252 gcc_assert (mode
!= CCFPmode
18253 || flag_finite_math_only
18254 || (code
!= LE
&& code
!= GE
18255 && code
!= UNEQ
&& code
!= LTGT
18256 && code
!= UNGT
&& code
!= UNLT
));
18258 /* These are invalid; the information is not there. */
18259 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18263 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18264 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18265 not zero, store there the bit offset (counted from the right) where
18266 the single stretch of 1 bits begins; and similarly for B, the bit
18267 offset where it ends. */
18270 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18272 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18273 unsigned HOST_WIDE_INT bit
;
18275 int n
= GET_MODE_PRECISION (mode
);
18277 if (mode
!= DImode
&& mode
!= SImode
)
18280 if (INTVAL (mask
) >= 0)
18283 ne
= exact_log2 (bit
);
18284 nb
= exact_log2 (val
+ bit
);
18286 else if (val
+ 1 == 0)
18295 nb
= exact_log2 (bit
);
18296 ne
= exact_log2 (val
+ bit
);
18301 ne
= exact_log2 (bit
);
18302 if (val
+ bit
== 0)
18310 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18321 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18322 or rldicr instruction, to implement an AND with it in mode MODE. */
18325 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18329 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18332 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18334 if (mode
== DImode
)
18335 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18337 /* For SImode, rlwinm can do everything. */
18338 if (mode
== SImode
)
18339 return (nb
< 32 && ne
< 32);
18344 /* Return the instruction template for an AND with mask in mode MODE, with
18345 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18348 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18352 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18353 gcc_unreachable ();
18355 if (mode
== DImode
&& ne
== 0)
18357 operands
[3] = GEN_INT (63 - nb
);
18359 return "rldicl. %0,%1,0,%3";
18360 return "rldicl %0,%1,0,%3";
18363 if (mode
== DImode
&& nb
== 63)
18365 operands
[3] = GEN_INT (63 - ne
);
18367 return "rldicr. %0,%1,0,%3";
18368 return "rldicr %0,%1,0,%3";
18371 if (nb
< 32 && ne
< 32)
18373 operands
[3] = GEN_INT (31 - nb
);
18374 operands
[4] = GEN_INT (31 - ne
);
18376 return "rlwinm. %0,%1,0,%3,%4";
18377 return "rlwinm %0,%1,0,%3,%4";
18380 gcc_unreachable ();
18383 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18384 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18385 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18388 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18392 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18395 int n
= GET_MODE_PRECISION (mode
);
18398 if (CONST_INT_P (XEXP (shift
, 1)))
18400 sh
= INTVAL (XEXP (shift
, 1));
18401 if (sh
< 0 || sh
>= n
)
18405 rtx_code code
= GET_CODE (shift
);
18407 /* Convert any shift by 0 to a rotate, to simplify below code. */
18411 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18412 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18414 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18420 /* DImode rotates need rld*. */
18421 if (mode
== DImode
&& code
== ROTATE
)
18422 return (nb
== 63 || ne
== 0 || ne
== sh
);
18424 /* SImode rotates need rlw*. */
18425 if (mode
== SImode
&& code
== ROTATE
)
18426 return (nb
< 32 && ne
< 32 && sh
< 32);
18428 /* Wrap-around masks are only okay for rotates. */
18432 /* Variable shifts are only okay for rotates. */
18436 /* Don't allow ASHIFT if the mask is wrong for that. */
18437 if (code
== ASHIFT
&& ne
< sh
)
18440 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18441 if the mask is wrong for that. */
18442 if (nb
< 32 && ne
< 32 && sh
< 32
18443 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18446 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18447 if the mask is wrong for that. */
18448 if (code
== LSHIFTRT
)
18450 if (nb
== 63 || ne
== 0 || ne
== sh
)
18451 return !(code
== LSHIFTRT
&& nb
>= sh
);
18456 /* Return the instruction template for a shift with mask in mode MODE, with
18457 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18460 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18464 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18465 gcc_unreachable ();
18467 if (mode
== DImode
&& ne
== 0)
18469 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18470 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18471 operands
[3] = GEN_INT (63 - nb
);
18473 return "rld%I2cl. %0,%1,%2,%3";
18474 return "rld%I2cl %0,%1,%2,%3";
18477 if (mode
== DImode
&& nb
== 63)
18479 operands
[3] = GEN_INT (63 - ne
);
18481 return "rld%I2cr. %0,%1,%2,%3";
18482 return "rld%I2cr %0,%1,%2,%3";
18486 && GET_CODE (operands
[4]) != LSHIFTRT
18487 && CONST_INT_P (operands
[2])
18488 && ne
== INTVAL (operands
[2]))
18490 operands
[3] = GEN_INT (63 - nb
);
18492 return "rld%I2c. %0,%1,%2,%3";
18493 return "rld%I2c %0,%1,%2,%3";
18496 if (nb
< 32 && ne
< 32)
18498 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18499 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18500 operands
[3] = GEN_INT (31 - nb
);
18501 operands
[4] = GEN_INT (31 - ne
);
18502 /* This insn can also be a 64-bit rotate with mask that really makes
18503 it just a shift right (with mask); the %h below are to adjust for
18504 that situation (shift count is >= 32 in that case). */
18506 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18507 return "rlw%I2nm %0,%1,%h2,%3,%4";
18510 gcc_unreachable ();
18513 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18514 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18515 ASHIFT, or LSHIFTRT) in mode MODE. */
18518 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
18522 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18525 int n
= GET_MODE_PRECISION (mode
);
18527 int sh
= INTVAL (XEXP (shift
, 1));
18528 if (sh
< 0 || sh
>= n
)
18531 rtx_code code
= GET_CODE (shift
);
18533 /* Convert any shift by 0 to a rotate, to simplify below code. */
18537 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18538 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18540 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18546 /* DImode rotates need rldimi. */
18547 if (mode
== DImode
&& code
== ROTATE
)
18550 /* SImode rotates need rlwimi. */
18551 if (mode
== SImode
&& code
== ROTATE
)
18552 return (nb
< 32 && ne
< 32 && sh
< 32);
18554 /* Wrap-around masks are only okay for rotates. */
18558 /* Don't allow ASHIFT if the mask is wrong for that. */
18559 if (code
== ASHIFT
&& ne
< sh
)
18562 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18563 if the mask is wrong for that. */
18564 if (nb
< 32 && ne
< 32 && sh
< 32
18565 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18568 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18569 if the mask is wrong for that. */
18570 if (code
== LSHIFTRT
)
18573 return !(code
== LSHIFTRT
&& nb
>= sh
);
18578 /* Return the instruction template for an insert with mask in mode MODE, with
18579 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18582 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18586 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18587 gcc_unreachable ();
18589 /* Prefer rldimi because rlwimi is cracked. */
18590 if (TARGET_POWERPC64
18591 && (!dot
|| mode
== DImode
)
18592 && GET_CODE (operands
[4]) != LSHIFTRT
18593 && ne
== INTVAL (operands
[2]))
18595 operands
[3] = GEN_INT (63 - nb
);
18597 return "rldimi. %0,%1,%2,%3";
18598 return "rldimi %0,%1,%2,%3";
18601 if (nb
< 32 && ne
< 32)
18603 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18604 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18605 operands
[3] = GEN_INT (31 - nb
);
18606 operands
[4] = GEN_INT (31 - ne
);
18608 return "rlwimi. %0,%1,%2,%3,%4";
18609 return "rlwimi %0,%1,%2,%3,%4";
18612 gcc_unreachable ();
18615 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18616 using two machine instructions. */
18619 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
18621 /* There are two kinds of AND we can handle with two insns:
18622 1) those we can do with two rl* insn;
18625 We do not handle that last case yet. */
18627 /* If there is just one stretch of ones, we can do it. */
18628 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
18631 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18632 one insn, we can do the whole thing with two. */
18633 unsigned HOST_WIDE_INT val
= INTVAL (c
);
18634 unsigned HOST_WIDE_INT bit1
= val
& -val
;
18635 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
18636 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
18637 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
18638 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
18641 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18642 If EXPAND is true, split rotate-and-mask instructions we generate to
18643 their constituent parts as well (this is used during expand); if DOT
18644 is 1, make the last insn a record-form instruction clobbering the
18645 destination GPR and setting the CC reg (from operands[3]); if 2, set
18646 that GPR as well as the CC reg. */
18649 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
18651 gcc_assert (!(expand
&& dot
));
18653 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
18655 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18656 shift right. This generates better code than doing the masks without
18657 shifts, or shifting first right and then left. */
18659 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
18661 gcc_assert (mode
== DImode
);
18663 int shift
= 63 - nb
;
18666 rtx tmp1
= gen_reg_rtx (DImode
);
18667 rtx tmp2
= gen_reg_rtx (DImode
);
18668 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
18669 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
18670 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
18674 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
18675 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
18676 emit_move_insn (operands
[0], tmp
);
18677 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
18678 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18683 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18684 that does the rest. */
18685 unsigned HOST_WIDE_INT bit1
= val
& -val
;
18686 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
18687 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
18688 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
18690 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
18691 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
18693 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
18695 /* Two "no-rotate"-and-mask instructions, for SImode. */
18696 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
18698 gcc_assert (mode
== SImode
);
18700 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
18701 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
18702 emit_move_insn (reg
, tmp
);
18703 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
18704 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18708 gcc_assert (mode
== DImode
);
18710 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18711 insns; we have to do the first in SImode, because it wraps. */
18712 if (mask2
<= 0xffffffff
18713 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
18715 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
18716 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
18718 rtx reg_low
= gen_lowpart (SImode
, reg
);
18719 emit_move_insn (reg_low
, tmp
);
18720 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
18721 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18725 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18726 at the top end), rotate back and clear the other hole. */
18727 int right
= exact_log2 (bit3
);
18728 int left
= 64 - right
;
18730 /* Rotate the mask too. */
18731 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
18735 rtx tmp1
= gen_reg_rtx (DImode
);
18736 rtx tmp2
= gen_reg_rtx (DImode
);
18737 rtx tmp3
= gen_reg_rtx (DImode
);
18738 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
18739 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
18740 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
18741 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
18745 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
18746 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
18747 emit_move_insn (operands
[0], tmp
);
18748 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
18749 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
18750 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18754 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18755 for lfq and stfq insns iff the registers are hard registers. */
18758 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
18760 /* We might have been passed a SUBREG. */
18761 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
18764 /* We might have been passed non floating point registers. */
18765 if (!FP_REGNO_P (REGNO (reg1
))
18766 || !FP_REGNO_P (REGNO (reg2
)))
18769 return (REGNO (reg1
) == REGNO (reg2
) - 1);
18772 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18773 addr1 and addr2 must be in consecutive memory locations
18774 (addr2 == addr1 + 8). */
18777 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
18780 unsigned int reg1
, reg2
;
18781 int offset1
, offset2
;
18783 /* The mems cannot be volatile. */
18784 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
18787 addr1
= XEXP (mem1
, 0);
18788 addr2
= XEXP (mem2
, 0);
18790 /* Extract an offset (if used) from the first addr. */
18791 if (GET_CODE (addr1
) == PLUS
)
18793 /* If not a REG, return zero. */
18794 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
18798 reg1
= REGNO (XEXP (addr1
, 0));
18799 /* The offset must be constant! */
18800 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
18802 offset1
= INTVAL (XEXP (addr1
, 1));
18805 else if (GET_CODE (addr1
) != REG
)
18809 reg1
= REGNO (addr1
);
18810 /* This was a simple (mem (reg)) expression. Offset is 0. */
18814 /* And now for the second addr. */
18815 if (GET_CODE (addr2
) == PLUS
)
18817 /* If not a REG, return zero. */
18818 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
18822 reg2
= REGNO (XEXP (addr2
, 0));
18823 /* The offset must be constant. */
18824 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
18826 offset2
= INTVAL (XEXP (addr2
, 1));
18829 else if (GET_CODE (addr2
) != REG
)
18833 reg2
= REGNO (addr2
);
18834 /* This was a simple (mem (reg)) expression. Offset is 0. */
18838 /* Both of these must have the same base register. */
18842 /* The offset for the second addr must be 8 more than the first addr. */
18843 if (offset2
!= offset1
+ 8)
18846 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18851 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18852 need to use DDmode, in all other cases we can use the same mode. */
18853 static machine_mode
18854 rs6000_secondary_memory_needed_mode (machine_mode mode
)
18856 if (lra_in_progress
&& mode
== SDmode
)
18861 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18862 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18863 only work on the traditional altivec registers, note if an altivec register
18866 static enum rs6000_reg_type
18867 register_to_reg_type (rtx reg
, bool *is_altivec
)
18869 HOST_WIDE_INT regno
;
18870 enum reg_class rclass
;
18872 if (GET_CODE (reg
) == SUBREG
)
18873 reg
= SUBREG_REG (reg
);
18876 return NO_REG_TYPE
;
18878 regno
= REGNO (reg
);
18879 if (regno
>= FIRST_PSEUDO_REGISTER
)
18881 if (!lra_in_progress
&& !reload_completed
)
18882 return PSEUDO_REG_TYPE
;
18884 regno
= true_regnum (reg
);
18885 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
18886 return PSEUDO_REG_TYPE
;
18889 gcc_assert (regno
>= 0);
18891 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
18892 *is_altivec
= true;
18894 rclass
= rs6000_regno_regclass
[regno
];
18895 return reg_class_to_reg_type
[(int)rclass
];
18898 /* Helper function to return the cost of adding a TOC entry address. */
18901 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
18905 if (TARGET_CMODEL
!= CMODEL_SMALL
)
18906 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
18909 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
18914 /* Helper function for rs6000_secondary_reload to determine whether the memory
18915 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18916 needs reloading. Return negative if the memory is not handled by the memory
18917 helper functions and to try a different reload method, 0 if no additional
18918 instructions are need, and positive to give the extra cost for the
18922 rs6000_secondary_reload_memory (rtx addr
,
18923 enum reg_class rclass
,
18926 int extra_cost
= 0;
18927 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
18928 addr_mask_type addr_mask
;
18929 const char *type
= NULL
;
18930 const char *fail_msg
= NULL
;
18932 if (GPR_REG_CLASS_P (rclass
))
18933 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
18935 else if (rclass
== FLOAT_REGS
)
18936 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
18938 else if (rclass
== ALTIVEC_REGS
)
18939 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
18941 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18942 else if (rclass
== VSX_REGS
)
18943 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
18944 & ~RELOAD_REG_AND_M16
);
18946 /* If the register allocator hasn't made up its mind yet on the register
18947 class to use, settle on defaults to use. */
18948 else if (rclass
== NO_REGS
)
18950 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
18951 & ~RELOAD_REG_AND_M16
);
18953 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
18954 addr_mask
&= ~(RELOAD_REG_INDEXED
18955 | RELOAD_REG_PRE_INCDEC
18956 | RELOAD_REG_PRE_MODIFY
);
18962 /* If the register isn't valid in this register class, just return now. */
18963 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
18965 if (TARGET_DEBUG_ADDR
)
18968 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18969 "not valid in class\n",
18970 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
18977 switch (GET_CODE (addr
))
18979 /* Does the register class supports auto update forms for this mode? We
18980 don't need a scratch register, since the powerpc only supports
18981 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18984 reg
= XEXP (addr
, 0);
18985 if (!base_reg_operand (addr
, GET_MODE (reg
)))
18987 fail_msg
= "no base register #1";
18991 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
18999 reg
= XEXP (addr
, 0);
19000 plus_arg1
= XEXP (addr
, 1);
19001 if (!base_reg_operand (reg
, GET_MODE (reg
))
19002 || GET_CODE (plus_arg1
) != PLUS
19003 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
19005 fail_msg
= "bad PRE_MODIFY";
19009 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19016 /* Do we need to simulate AND -16 to clear the bottom address bits used
19017 in VMX load/stores? Only allow the AND for vector sizes. */
19019 and_arg
= XEXP (addr
, 0);
19020 if (GET_MODE_SIZE (mode
) != 16
19021 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
19022 || INTVAL (XEXP (addr
, 1)) != -16)
19024 fail_msg
= "bad Altivec AND #1";
19028 if (rclass
!= ALTIVEC_REGS
)
19030 if (legitimate_indirect_address_p (and_arg
, false))
19033 else if (legitimate_indexed_address_p (and_arg
, false))
19038 fail_msg
= "bad Altivec AND #2";
19046 /* If this is an indirect address, make sure it is a base register. */
19049 if (!legitimate_indirect_address_p (addr
, false))
19056 /* If this is an indexed address, make sure the register class can handle
19057 indexed addresses for this mode. */
19059 plus_arg0
= XEXP (addr
, 0);
19060 plus_arg1
= XEXP (addr
, 1);
19062 /* (plus (plus (reg) (constant)) (constant)) is generated during
19063 push_reload processing, so handle it now. */
19064 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
19066 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19073 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19074 push_reload processing, so handle it now. */
19075 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
19077 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19080 type
= "indexed #2";
19084 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
19086 fail_msg
= "no base register #2";
19090 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
19092 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
19093 || !legitimate_indexed_address_p (addr
, false))
19100 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
19101 && CONST_INT_P (plus_arg1
))
19103 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
19106 type
= "vector d-form offset";
19110 /* Make sure the register class can handle offset addresses. */
19111 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19113 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19116 type
= "offset #2";
19122 fail_msg
= "bad PLUS";
19129 /* Quad offsets are restricted and can't handle normal addresses. */
19130 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19133 type
= "vector d-form lo_sum";
19136 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
19138 fail_msg
= "bad LO_SUM";
19142 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19149 /* Static addresses need to create a TOC entry. */
19153 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19156 type
= "vector d-form lo_sum #2";
19162 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19166 /* TOC references look like offsetable memory. */
19168 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19170 fail_msg
= "bad UNSPEC";
19174 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19177 type
= "vector d-form lo_sum #3";
19180 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19183 type
= "toc reference";
19189 fail_msg
= "bad address";
19194 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19196 if (extra_cost
< 0)
19198 "rs6000_secondary_reload_memory error: mode = %s, "
19199 "class = %s, addr_mask = '%s', %s\n",
19200 GET_MODE_NAME (mode
),
19201 reg_class_names
[rclass
],
19202 rs6000_debug_addr_mask (addr_mask
, false),
19203 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19207 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19208 "addr_mask = '%s', extra cost = %d, %s\n",
19209 GET_MODE_NAME (mode
),
19210 reg_class_names
[rclass
],
19211 rs6000_debug_addr_mask (addr_mask
, false),
19213 (type
) ? type
: "<none>");
19221 /* Helper function for rs6000_secondary_reload to return true if a move to a
19222 different register classe is really a simple move. */
19225 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19226 enum rs6000_reg_type from_type
,
19229 int size
= GET_MODE_SIZE (mode
);
19231 /* Add support for various direct moves available. In this function, we only
19232 look at cases where we don't need any extra registers, and one or more
19233 simple move insns are issued. Originally small integers are not allowed
19234 in FPR/VSX registers. Single precision binary floating is not a simple
19235 move because we need to convert to the single precision memory layout.
19236 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19237 need special direct move handling, which we do not support yet. */
19238 if (TARGET_DIRECT_MOVE
19239 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19240 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19242 if (TARGET_POWERPC64
)
19244 /* ISA 2.07: MTVSRD or MVFVSRD. */
19248 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19249 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19253 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19254 if (TARGET_P8_VECTOR
)
19256 if (mode
== SImode
)
19259 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19263 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19264 if (mode
== SDmode
)
19268 /* Power6+: MFTGPR or MFFGPR. */
19269 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19270 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19271 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19274 /* Move to/from SPR. */
19275 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19276 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19277 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19283 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19284 special direct moves that involve allocating an extra register, return the
19285 insn code of the helper function if there is such a function or
19286 CODE_FOR_nothing if not. */
19289 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19290 enum rs6000_reg_type from_type
,
19292 secondary_reload_info
*sri
,
19296 enum insn_code icode
= CODE_FOR_nothing
;
19298 int size
= GET_MODE_SIZE (mode
);
19300 if (TARGET_POWERPC64
&& size
== 16)
19302 /* Handle moving 128-bit values from GPRs to VSX point registers on
19303 ISA 2.07 (power8, power9) when running in 64-bit mode using
19304 XXPERMDI to glue the two 64-bit values back together. */
19305 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19307 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19308 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19311 /* Handle moving 128-bit values from VSX point registers to GPRs on
19312 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19313 bottom 64-bit value. */
19314 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19316 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19317 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19321 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19323 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19325 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19326 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19329 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19331 cost
= 2; /* mtvsrz, xscvspdpn. */
19332 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19336 else if (!TARGET_POWERPC64
&& size
== 8)
19338 /* Handle moving 64-bit values from GPRs to floating point registers on
19339 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19340 32-bit values back together. Altivec register classes must be handled
19341 specially since a different instruction is used, and the secondary
19342 reload support requires a single instruction class in the scratch
19343 register constraint. However, right now TFmode is not allowed in
19344 Altivec registers, so the pattern will never match. */
19345 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19347 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19348 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19352 if (icode
!= CODE_FOR_nothing
)
19357 sri
->icode
= icode
;
19358 sri
->extra_cost
= cost
;
19365 /* Return whether a move between two register classes can be done either
19366 directly (simple move) or via a pattern that uses a single extra temporary
19367 (using ISA 2.07's direct move in this case. */
19370 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19371 enum rs6000_reg_type from_type
,
19373 secondary_reload_info
*sri
,
19376 /* Fall back to load/store reloads if either type is not a register. */
19377 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19380 /* If we haven't allocated registers yet, assume the move can be done for the
19381 standard register types. */
19382 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19383 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19384 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19387 /* Moves to the same set of registers is a simple move for non-specialized
19389 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19392 /* Check whether a simple move can be done directly. */
19393 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19397 sri
->icode
= CODE_FOR_nothing
;
19398 sri
->extra_cost
= 0;
19403 /* Now check if we can do it in a few steps. */
19404 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19408 /* Inform reload about cases where moving X with a mode MODE to a register in
19409 RCLASS requires an extra scratch or immediate register. Return the class
19410 needed for the immediate register.
19412 For VSX and Altivec, we may need a register to convert sp+offset into
19415 For misaligned 64-bit gpr loads and stores we need a register to
19416 convert an offset address to indirect. */
19419 rs6000_secondary_reload (bool in_p
,
19421 reg_class_t rclass_i
,
19423 secondary_reload_info
*sri
)
19425 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19426 reg_class_t ret
= ALL_REGS
;
19427 enum insn_code icode
;
19428 bool default_p
= false;
19429 bool done_p
= false;
19431 /* Allow subreg of memory before/during reload. */
19432 bool memory_p
= (MEM_P (x
)
19433 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19434 && MEM_P (SUBREG_REG (x
))));
19436 sri
->icode
= CODE_FOR_nothing
;
19437 sri
->t_icode
= CODE_FOR_nothing
;
19438 sri
->extra_cost
= 0;
19440 ? reg_addr
[mode
].reload_load
19441 : reg_addr
[mode
].reload_store
);
19443 if (REG_P (x
) || register_operand (x
, mode
))
19445 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19446 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19447 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19450 std::swap (to_type
, from_type
);
19452 /* Can we do a direct move of some sort? */
19453 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19456 icode
= (enum insn_code
)sri
->icode
;
19463 /* Make sure 0.0 is not reloaded or forced into memory. */
19464 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19471 /* If this is a scalar floating point value and we want to load it into the
19472 traditional Altivec registers, do it via a move via a traditional floating
19473 point register, unless we have D-form addressing. Also make sure that
19474 non-zero constants use a FPR. */
19475 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19476 && !mode_supports_vmx_dform (mode
)
19477 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19478 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19485 /* Handle reload of load/stores if we have reload helper functions. */
19486 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
19488 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
19491 if (extra_cost
>= 0)
19495 if (extra_cost
> 0)
19497 sri
->extra_cost
= extra_cost
;
19498 sri
->icode
= icode
;
19503 /* Handle unaligned loads and stores of integer registers. */
19504 if (!done_p
&& TARGET_POWERPC64
19505 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19507 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
19509 rtx addr
= XEXP (x
, 0);
19510 rtx off
= address_offset (addr
);
19512 if (off
!= NULL_RTX
)
19514 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19515 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19517 /* We need a secondary reload when our legitimate_address_p
19518 says the address is good (as otherwise the entire address
19519 will be reloaded), and the offset is not a multiple of
19520 four or we have an address wrap. Address wrap will only
19521 occur for LO_SUMs since legitimate_offset_address_p
19522 rejects addresses for 16-byte mems that will wrap. */
19523 if (GET_CODE (addr
) == LO_SUM
19524 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19525 && ((offset
& 3) != 0
19526 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
19527 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
19528 && (offset
& 3) != 0))
19530 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19532 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
19533 : CODE_FOR_reload_di_load
);
19535 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
19536 : CODE_FOR_reload_di_store
);
19537 sri
->extra_cost
= 2;
19548 if (!done_p
&& !TARGET_POWERPC64
19549 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19551 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
19553 rtx addr
= XEXP (x
, 0);
19554 rtx off
= address_offset (addr
);
19556 if (off
!= NULL_RTX
)
19558 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19559 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19561 /* We need a secondary reload when our legitimate_address_p
19562 says the address is good (as otherwise the entire address
19563 will be reloaded), and we have a wrap.
19565 legitimate_lo_sum_address_p allows LO_SUM addresses to
19566 have any offset so test for wrap in the low 16 bits.
19568 legitimate_offset_address_p checks for the range
19569 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19570 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19571 [0x7ff4,0x7fff] respectively, so test for the
19572 intersection of these ranges, [0x7ffc,0x7fff] and
19573 [0x7ff4,0x7ff7] respectively.
19575 Note that the address we see here may have been
19576 manipulated by legitimize_reload_address. */
19577 if (GET_CODE (addr
) == LO_SUM
19578 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
19579 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
19582 sri
->icode
= CODE_FOR_reload_si_load
;
19584 sri
->icode
= CODE_FOR_reload_si_store
;
19585 sri
->extra_cost
= 2;
19600 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
19602 gcc_assert (ret
!= ALL_REGS
);
19604 if (TARGET_DEBUG_ADDR
)
19607 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19609 reg_class_names
[ret
],
19610 in_p
? "true" : "false",
19611 reg_class_names
[rclass
],
19612 GET_MODE_NAME (mode
));
19614 if (reload_completed
)
19615 fputs (", after reload", stderr
);
19618 fputs (", done_p not set", stderr
);
19621 fputs (", default secondary reload", stderr
);
19623 if (sri
->icode
!= CODE_FOR_nothing
)
19624 fprintf (stderr
, ", reload func = %s, extra cost = %d",
19625 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
19627 else if (sri
->extra_cost
> 0)
19628 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
19630 fputs ("\n", stderr
);
19637 /* Better tracing for rs6000_secondary_reload_inner. */
19640 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
19645 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
19647 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
19648 store_p
? "store" : "load");
19651 set
= gen_rtx_SET (mem
, reg
);
19653 set
= gen_rtx_SET (reg
, mem
);
19655 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
19656 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
19659 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
19660 ATTRIBUTE_NORETURN
;
19663 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
19666 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
19667 gcc_unreachable ();
19670 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19671 reload helper functions. These were identified in
19672 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19673 reload, it calls the insns:
19674 reload_<RELOAD:mode>_<P:mptrsize>_store
19675 reload_<RELOAD:mode>_<P:mptrsize>_load
19677 which in turn calls this function, to do whatever is necessary to create
19678 valid addresses. */
19681 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
19683 int regno
= true_regnum (reg
);
19684 machine_mode mode
= GET_MODE (reg
);
19685 addr_mask_type addr_mask
;
19688 rtx op_reg
, op0
, op1
;
19693 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
19694 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
19695 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19697 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
19698 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19700 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
19701 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19703 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
19704 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19707 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19709 /* Make sure the mode is valid in this register class. */
19710 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19711 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19713 if (TARGET_DEBUG_ADDR
)
19714 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
19716 new_addr
= addr
= XEXP (mem
, 0);
19717 switch (GET_CODE (addr
))
19719 /* Does the register class support auto update forms for this mode? If
19720 not, do the update now. We don't need a scratch register, since the
19721 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19724 op_reg
= XEXP (addr
, 0);
19725 if (!base_reg_operand (op_reg
, Pmode
))
19726 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19728 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19730 int delta
= GET_MODE_SIZE (mode
);
19731 if (GET_CODE (addr
) == PRE_DEC
)
19733 emit_insn (gen_add2_insn (op_reg
, GEN_INT (delta
)));
19739 op0
= XEXP (addr
, 0);
19740 op1
= XEXP (addr
, 1);
19741 if (!base_reg_operand (op0
, Pmode
)
19742 || GET_CODE (op1
) != PLUS
19743 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
19744 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19746 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19748 emit_insn (gen_rtx_SET (op0
, op1
));
19753 /* Do we need to simulate AND -16 to clear the bottom address bits used
19754 in VMX load/stores? */
19756 op0
= XEXP (addr
, 0);
19757 op1
= XEXP (addr
, 1);
19758 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
19760 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
19763 else if (GET_CODE (op1
) == PLUS
)
19765 emit_insn (gen_rtx_SET (scratch
, op1
));
19770 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19772 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
19773 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
19774 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
19775 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
19776 new_addr
= scratch
;
19780 /* If this is an indirect address, make sure it is a base register. */
19783 if (!base_reg_operand (addr
, GET_MODE (addr
)))
19785 emit_insn (gen_rtx_SET (scratch
, addr
));
19786 new_addr
= scratch
;
19790 /* If this is an indexed address, make sure the register class can handle
19791 indexed addresses for this mode. */
19793 op0
= XEXP (addr
, 0);
19794 op1
= XEXP (addr
, 1);
19795 if (!base_reg_operand (op0
, Pmode
))
19796 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19798 else if (int_reg_operand (op1
, Pmode
))
19800 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19802 emit_insn (gen_rtx_SET (scratch
, addr
));
19803 new_addr
= scratch
;
19807 else if (mode_supports_dq_form (mode
) && CONST_INT_P (op1
))
19809 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
19810 || !quad_address_p (addr
, mode
, false))
19812 emit_insn (gen_rtx_SET (scratch
, addr
));
19813 new_addr
= scratch
;
19817 /* Make sure the register class can handle offset addresses. */
19818 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19820 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19822 emit_insn (gen_rtx_SET (scratch
, addr
));
19823 new_addr
= scratch
;
19828 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19833 op0
= XEXP (addr
, 0);
19834 op1
= XEXP (addr
, 1);
19835 if (!base_reg_operand (op0
, Pmode
))
19836 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19838 else if (int_reg_operand (op1
, Pmode
))
19840 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19842 emit_insn (gen_rtx_SET (scratch
, addr
));
19843 new_addr
= scratch
;
19847 /* Quad offsets are restricted and can't handle normal addresses. */
19848 else if (mode_supports_dq_form (mode
))
19850 emit_insn (gen_rtx_SET (scratch
, addr
));
19851 new_addr
= scratch
;
19854 /* Make sure the register class can handle offset addresses. */
19855 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
19857 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19859 emit_insn (gen_rtx_SET (scratch
, addr
));
19860 new_addr
= scratch
;
19865 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19872 rs6000_emit_move (scratch
, addr
, Pmode
);
19873 new_addr
= scratch
;
19877 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19880 /* Adjust the address if it changed. */
19881 if (addr
!= new_addr
)
19883 mem
= replace_equiv_address_nv (mem
, new_addr
);
19884 if (TARGET_DEBUG_ADDR
)
19885 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19888 /* Now create the move. */
19890 emit_insn (gen_rtx_SET (mem
, reg
));
19892 emit_insn (gen_rtx_SET (reg
, mem
));
19897 /* Convert reloads involving 64-bit gprs and misaligned offset
19898 addressing, or multiple 32-bit gprs and offsets that are too large,
19899 to use indirect addressing. */
19902 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
19904 int regno
= true_regnum (reg
);
19905 enum reg_class rclass
;
19907 rtx scratch_or_premodify
= scratch
;
19909 if (TARGET_DEBUG_ADDR
)
19911 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
19912 store_p
? "store" : "load");
19913 fprintf (stderr
, "reg:\n");
19915 fprintf (stderr
, "mem:\n");
19917 fprintf (stderr
, "scratch:\n");
19918 debug_rtx (scratch
);
19921 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
19922 gcc_assert (GET_CODE (mem
) == MEM
);
19923 rclass
= REGNO_REG_CLASS (regno
);
19924 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
19925 addr
= XEXP (mem
, 0);
19927 if (GET_CODE (addr
) == PRE_MODIFY
)
19929 gcc_assert (REG_P (XEXP (addr
, 0))
19930 && GET_CODE (XEXP (addr
, 1)) == PLUS
19931 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
19932 scratch_or_premodify
= XEXP (addr
, 0);
19933 addr
= XEXP (addr
, 1);
19935 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
19937 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
19939 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
19941 /* Now create the move. */
19943 emit_insn (gen_rtx_SET (mem
, reg
));
19945 emit_insn (gen_rtx_SET (reg
, mem
));
19950 /* Given an rtx X being reloaded into a reg required to be
19951 in class CLASS, return the class of reg to actually use.
19952 In general this is just CLASS; but on some machines
19953 in some cases it is preferable to use a more restrictive class.
19955 On the RS/6000, we have to return NO_REGS when we want to reload a
19956 floating-point CONST_DOUBLE to force it to be copied to memory.
19958 We also don't want to reload integer values into floating-point
19959 registers if we can at all help it. In fact, this can
19960 cause reload to die, if it tries to generate a reload of CTR
19961 into a FP register and discovers it doesn't have the memory location
19964 ??? Would it be a good idea to have reload do the converse, that is
19965 try to reload floating modes into FP registers if possible?
19968 static enum reg_class
19969 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
19971 machine_mode mode
= GET_MODE (x
);
19972 bool is_constant
= CONSTANT_P (x
);
19974 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19975 reload class for it. */
19976 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
19977 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
19980 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
19981 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
19984 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19985 the reloading of address expressions using PLUS into floating point
19987 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
19991 /* Zero is always allowed in all VSX registers. */
19992 if (x
== CONST0_RTX (mode
))
19995 /* If this is a vector constant that can be formed with a few Altivec
19996 instructions, we want altivec registers. */
19997 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
19998 return ALTIVEC_REGS
;
20000 /* If this is an integer constant that can easily be loaded into
20001 vector registers, allow it. */
20002 if (CONST_INT_P (x
))
20004 HOST_WIDE_INT value
= INTVAL (x
);
20006 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20007 2.06 can generate it in the Altivec registers with
20011 if (TARGET_P8_VECTOR
)
20013 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20014 return ALTIVEC_REGS
;
20019 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20020 a sign extend in the Altivec registers. */
20021 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
20022 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
20023 return ALTIVEC_REGS
;
20026 /* Force constant to memory. */
20030 /* D-form addressing can easily reload the value. */
20031 if (mode_supports_vmx_dform (mode
)
20032 || mode_supports_dq_form (mode
))
20035 /* If this is a scalar floating point value and we don't have D-form
20036 addressing, prefer the traditional floating point registers so that we
20037 can use D-form (register+offset) addressing. */
20038 if (rclass
== VSX_REGS
20039 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
20042 /* Prefer the Altivec registers if Altivec is handling the vector
20043 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20045 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
20046 || mode
== V1TImode
)
20047 return ALTIVEC_REGS
;
20052 if (is_constant
|| GET_CODE (x
) == PLUS
)
20054 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
20055 return GENERAL_REGS
;
20056 if (reg_class_subset_p (BASE_REGS
, rclass
))
20061 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
20062 return GENERAL_REGS
;
20067 /* Debug version of rs6000_preferred_reload_class. */
20068 static enum reg_class
20069 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
20071 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
20074 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20076 reg_class_names
[ret
], reg_class_names
[rclass
],
20077 GET_MODE_NAME (GET_MODE (x
)));
20083 /* If we are copying between FP or AltiVec registers and anything else, we need
20084 a memory location. The exception is when we are targeting ppc64 and the
20085 move to/from fpr to gpr instructions are available. Also, under VSX, you
20086 can copy vector registers from the FP register set to the Altivec register
20087 set and vice versa. */
20090 rs6000_secondary_memory_needed (machine_mode mode
,
20091 reg_class_t from_class
,
20092 reg_class_t to_class
)
20094 enum rs6000_reg_type from_type
, to_type
;
20095 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
20096 || (to_class
== ALTIVEC_REGS
));
20098 /* If a simple/direct move is available, we don't need secondary memory */
20099 from_type
= reg_class_to_reg_type
[(int)from_class
];
20100 to_type
= reg_class_to_reg_type
[(int)to_class
];
20102 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
20103 (secondary_reload_info
*)0, altivec_p
))
20106 /* If we have a floating point or vector register class, we need to use
20107 memory to transfer the data. */
20108 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
20114 /* Debug version of rs6000_secondary_memory_needed. */
20116 rs6000_debug_secondary_memory_needed (machine_mode mode
,
20117 reg_class_t from_class
,
20118 reg_class_t to_class
)
20120 bool ret
= rs6000_secondary_memory_needed (mode
, from_class
, to_class
);
20123 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20124 "to_class = %s, mode = %s\n",
20125 ret
? "true" : "false",
20126 reg_class_names
[from_class
],
20127 reg_class_names
[to_class
],
20128 GET_MODE_NAME (mode
));
20133 /* Return the register class of a scratch register needed to copy IN into
20134 or out of a register in RCLASS in MODE. If it can be done directly,
20135 NO_REGS is returned. */
20137 static enum reg_class
20138 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
20143 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
20145 && MACHOPIC_INDIRECT
20149 /* We cannot copy a symbolic operand directly into anything
20150 other than BASE_REGS for TARGET_ELF. So indicate that a
20151 register from BASE_REGS is needed as an intermediate
20154 On Darwin, pic addresses require a load from memory, which
20155 needs a base register. */
20156 if (rclass
!= BASE_REGS
20157 && (GET_CODE (in
) == SYMBOL_REF
20158 || GET_CODE (in
) == HIGH
20159 || GET_CODE (in
) == LABEL_REF
20160 || GET_CODE (in
) == CONST
))
20164 if (GET_CODE (in
) == REG
)
20166 regno
= REGNO (in
);
20167 if (regno
>= FIRST_PSEUDO_REGISTER
)
20169 regno
= true_regnum (in
);
20170 if (regno
>= FIRST_PSEUDO_REGISTER
)
20174 else if (GET_CODE (in
) == SUBREG
)
20176 regno
= true_regnum (in
);
20177 if (regno
>= FIRST_PSEUDO_REGISTER
)
20183 /* If we have VSX register moves, prefer moving scalar values between
20184 Altivec registers and GPR by going via an FPR (and then via memory)
20185 instead of reloading the secondary memory address for Altivec moves. */
20187 && GET_MODE_SIZE (mode
) < 16
20188 && !mode_supports_vmx_dform (mode
)
20189 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20190 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20191 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20192 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20195 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20197 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20198 || (regno
>= 0 && INT_REGNO_P (regno
)))
20201 /* Constants, memory, and VSX registers can go into VSX registers (both the
20202 traditional floating point and the altivec registers). */
20203 if (rclass
== VSX_REGS
20204 && (regno
== -1 || VSX_REGNO_P (regno
)))
20207 /* Constants, memory, and FP registers can go into FP registers. */
20208 if ((regno
== -1 || FP_REGNO_P (regno
))
20209 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20210 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20212 /* Memory, and AltiVec registers can go into AltiVec registers. */
20213 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20214 && rclass
== ALTIVEC_REGS
)
20217 /* We can copy among the CR registers. */
20218 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20219 && regno
>= 0 && CR_REGNO_P (regno
))
20222 /* Otherwise, we need GENERAL_REGS. */
20223 return GENERAL_REGS
;
20226 /* Debug version of rs6000_secondary_reload_class. */
20227 static enum reg_class
20228 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20229 machine_mode mode
, rtx in
)
20231 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20233 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20234 "mode = %s, input rtx:\n",
20235 reg_class_names
[ret
], reg_class_names
[rclass
],
20236 GET_MODE_NAME (mode
));
20242 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20245 rs6000_can_change_mode_class (machine_mode from
,
20247 reg_class_t rclass
)
20249 unsigned from_size
= GET_MODE_SIZE (from
);
20250 unsigned to_size
= GET_MODE_SIZE (to
);
20252 if (from_size
!= to_size
)
20254 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20256 if (reg_classes_intersect_p (xclass
, rclass
))
20258 unsigned to_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, to
);
20259 unsigned from_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, from
);
20260 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20261 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20263 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20264 single register under VSX because the scalar part of the register
20265 is in the upper 64-bits, and not the lower 64-bits. Types like
20266 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20267 IEEE floating point can't overlap, and neither can small
20270 if (to_float128_vector_p
&& from_float128_vector_p
)
20273 else if (to_float128_vector_p
|| from_float128_vector_p
)
20276 /* TDmode in floating-mode registers must always go into a register
20277 pair with the most significant word in the even-numbered register
20278 to match ISA requirements. In little-endian mode, this does not
20279 match subreg numbering, so we cannot allow subregs. */
20280 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20283 if (from_size
< 8 || to_size
< 8)
20286 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20289 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20298 /* Since the VSX register set includes traditional floating point registers
20299 and altivec registers, just check for the size being different instead of
20300 trying to check whether the modes are vector modes. Otherwise it won't
20301 allow say DF and DI to change classes. For types like TFmode and TDmode
20302 that take 2 64-bit registers, rather than a single 128-bit register, don't
20303 allow subregs of those types to other 128 bit types. */
20304 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20306 unsigned num_regs
= (from_size
+ 15) / 16;
20307 if (hard_regno_nregs (FIRST_FPR_REGNO
, to
) > num_regs
20308 || hard_regno_nregs (FIRST_FPR_REGNO
, from
) > num_regs
)
20311 return (from_size
== 8 || from_size
== 16);
20314 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20315 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20321 /* Debug version of rs6000_can_change_mode_class. */
20323 rs6000_debug_can_change_mode_class (machine_mode from
,
20325 reg_class_t rclass
)
20327 bool ret
= rs6000_can_change_mode_class (from
, to
, rclass
);
20330 "rs6000_can_change_mode_class, return %s, from = %s, "
20331 "to = %s, rclass = %s\n",
20332 ret
? "true" : "false",
20333 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20334 reg_class_names
[rclass
]);
20339 /* Return a string to do a move operation of 128 bits of data. */
20342 rs6000_output_move_128bit (rtx operands
[])
20344 rtx dest
= operands
[0];
20345 rtx src
= operands
[1];
20346 machine_mode mode
= GET_MODE (dest
);
20349 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20350 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20354 dest_regno
= REGNO (dest
);
20355 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20356 dest_fp_p
= FP_REGNO_P (dest_regno
);
20357 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20358 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20363 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20368 src_regno
= REGNO (src
);
20369 src_gpr_p
= INT_REGNO_P (src_regno
);
20370 src_fp_p
= FP_REGNO_P (src_regno
);
20371 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20372 src_vsx_p
= src_fp_p
| src_vmx_p
;
20377 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20380 /* Register moves. */
20381 if (dest_regno
>= 0 && src_regno
>= 0)
20388 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20389 return (WORDS_BIG_ENDIAN
20390 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20391 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20393 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20397 else if (TARGET_VSX
&& dest_vsx_p
)
20400 return "xxlor %x0,%x1,%x1";
20402 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20403 return (WORDS_BIG_ENDIAN
20404 ? "mtvsrdd %x0,%1,%L1"
20405 : "mtvsrdd %x0,%L1,%1");
20407 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20411 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20412 return "vor %0,%1,%1";
20414 else if (dest_fp_p
&& src_fp_p
)
20419 else if (dest_regno
>= 0 && MEM_P (src
))
20423 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20429 else if (TARGET_ALTIVEC
&& dest_vmx_p
20430 && altivec_indexed_or_indirect_operand (src
, mode
))
20431 return "lvx %0,%y1";
20433 else if (TARGET_VSX
&& dest_vsx_p
)
20435 if (mode_supports_dq_form (mode
)
20436 && quad_address_p (XEXP (src
, 0), mode
, true))
20437 return "lxv %x0,%1";
20439 else if (TARGET_P9_VECTOR
)
20440 return "lxvx %x0,%y1";
20442 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20443 return "lxvw4x %x0,%y1";
20446 return "lxvd2x %x0,%y1";
20449 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20450 return "lvx %0,%y1";
20452 else if (dest_fp_p
)
20457 else if (src_regno
>= 0 && MEM_P (dest
))
20461 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20462 return "stq %1,%0";
20467 else if (TARGET_ALTIVEC
&& src_vmx_p
20468 && altivec_indexed_or_indirect_operand (dest
, mode
))
20469 return "stvx %1,%y0";
20471 else if (TARGET_VSX
&& src_vsx_p
)
20473 if (mode_supports_dq_form (mode
)
20474 && quad_address_p (XEXP (dest
, 0), mode
, true))
20475 return "stxv %x1,%0";
20477 else if (TARGET_P9_VECTOR
)
20478 return "stxvx %x1,%y0";
20480 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20481 return "stxvw4x %x1,%y0";
20484 return "stxvd2x %x1,%y0";
20487 else if (TARGET_ALTIVEC
&& src_vmx_p
)
20488 return "stvx %1,%y0";
20495 else if (dest_regno
>= 0
20496 && (GET_CODE (src
) == CONST_INT
20497 || GET_CODE (src
) == CONST_WIDE_INT
20498 || GET_CODE (src
) == CONST_DOUBLE
20499 || GET_CODE (src
) == CONST_VECTOR
))
20504 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
20505 || (dest_vsx_p
&& TARGET_VSX
))
20506 return output_vec_const_move (operands
);
20509 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
20512 /* Validate a 128-bit move. */
20514 rs6000_move_128bit_ok_p (rtx operands
[])
20516 machine_mode mode
= GET_MODE (operands
[0]);
20517 return (gpc_reg_operand (operands
[0], mode
)
20518 || gpc_reg_operand (operands
[1], mode
));
20521 /* Return true if a 128-bit move needs to be split. */
20523 rs6000_split_128bit_ok_p (rtx operands
[])
20525 if (!reload_completed
)
20528 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
20531 if (quad_load_store_p (operands
[0], operands
[1]))
20538 /* Given a comparison operation, return the bit number in CCR to test. We
20539 know this is a valid comparison.
20541 SCC_P is 1 if this is for an scc. That means that %D will have been
20542 used instead of %C, so the bits will be in different places.
20544 Return -1 if OP isn't a valid comparison for some reason. */
20547 ccr_bit (rtx op
, int scc_p
)
20549 enum rtx_code code
= GET_CODE (op
);
20550 machine_mode cc_mode
;
20555 if (!COMPARISON_P (op
))
20558 reg
= XEXP (op
, 0);
20560 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
20562 cc_mode
= GET_MODE (reg
);
20563 cc_regnum
= REGNO (reg
);
20564 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
20566 validate_condition_mode (code
, cc_mode
);
20568 /* When generating a sCOND operation, only positive conditions are
20571 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
20572 || code
== GTU
|| code
== LTU
);
20577 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
20579 return base_bit
+ 2;
20580 case GT
: case GTU
: case UNLE
:
20581 return base_bit
+ 1;
20582 case LT
: case LTU
: case UNGE
:
20584 case ORDERED
: case UNORDERED
:
20585 return base_bit
+ 3;
20588 /* If scc, we will have done a cror to put the bit in the
20589 unordered position. So test that bit. For integer, this is ! LT
20590 unless this is an scc insn. */
20591 return scc_p
? base_bit
+ 3 : base_bit
;
20594 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
20597 gcc_unreachable ();
20601 /* Return the GOT register. */
20604 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
20606 /* The second flow pass currently (June 1999) can't update
20607 regs_ever_live without disturbing other parts of the compiler, so
20608 update it here to make the prolog/epilogue code happy. */
20609 if (!can_create_pseudo_p ()
20610 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
20611 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
20613 crtl
->uses_pic_offset_table
= 1;
20615 return pic_offset_table_rtx
;
20618 static rs6000_stack_t stack_info
;
20620 /* Function to init struct machine_function.
20621 This will be called, via a pointer variable,
20622 from push_function_context. */
20624 static struct machine_function
*
20625 rs6000_init_machine_status (void)
20627 stack_info
.reload_completed
= 0;
20628 return ggc_cleared_alloc
<machine_function
> ();
20631 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20633 /* Write out a function code label. */
20636 rs6000_output_function_entry (FILE *file
, const char *fname
)
20638 if (fname
[0] != '.')
20640 switch (DEFAULT_ABI
)
20643 gcc_unreachable ();
20649 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
20659 RS6000_OUTPUT_BASENAME (file
, fname
);
20662 /* Print an operand. Recognize special options, documented below. */
20665 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20666 only introduced by the linker, when applying the sda21
20668 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20669 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20671 #define SMALL_DATA_RELOC "sda21"
20672 #define SMALL_DATA_REG 0
20676 print_operand (FILE *file
, rtx x
, int code
)
20679 unsigned HOST_WIDE_INT uval
;
20683 /* %a is output_address. */
20685 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20689 /* Like 'J' but get to the GT bit only. */
20692 output_operand_lossage ("invalid %%D value");
20696 /* Bit 1 is GT bit. */
20697 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
20699 /* Add one for shift count in rlinm for scc. */
20700 fprintf (file
, "%d", i
+ 1);
20704 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20707 output_operand_lossage ("invalid %%e value");
20712 if ((uval
& 0xffff) == 0 && uval
!= 0)
20717 /* X is a CR register. Print the number of the EQ bit of the CR */
20718 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20719 output_operand_lossage ("invalid %%E value");
20721 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
20725 /* X is a CR register. Print the shift count needed to move it
20726 to the high-order four bits. */
20727 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20728 output_operand_lossage ("invalid %%f value");
20730 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
20734 /* Similar, but print the count for the rotate in the opposite
20736 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20737 output_operand_lossage ("invalid %%F value");
20739 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
20743 /* X is a constant integer. If it is negative, print "m",
20744 otherwise print "z". This is to make an aze or ame insn. */
20745 if (GET_CODE (x
) != CONST_INT
)
20746 output_operand_lossage ("invalid %%G value");
20747 else if (INTVAL (x
) >= 0)
20754 /* If constant, output low-order five bits. Otherwise, write
20757 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
20759 print_operand (file
, x
, 0);
20763 /* If constant, output low-order six bits. Otherwise, write
20766 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
20768 print_operand (file
, x
, 0);
20772 /* Print `i' if this is a constant, else nothing. */
20778 /* Write the bit number in CCR for jump. */
20779 i
= ccr_bit (x
, 0);
20781 output_operand_lossage ("invalid %%j code");
20783 fprintf (file
, "%d", i
);
20787 /* Similar, but add one for shift count in rlinm for scc and pass
20788 scc flag to `ccr_bit'. */
20789 i
= ccr_bit (x
, 1);
20791 output_operand_lossage ("invalid %%J code");
20793 /* If we want bit 31, write a shift count of zero, not 32. */
20794 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
20798 /* X must be a constant. Write the 1's complement of the
20801 output_operand_lossage ("invalid %%k value");
20803 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
20807 /* X must be a symbolic constant on ELF. Write an
20808 expression suitable for an 'addi' that adds in the low 16
20809 bits of the MEM. */
20810 if (GET_CODE (x
) == CONST
)
20812 if (GET_CODE (XEXP (x
, 0)) != PLUS
20813 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
20814 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
20815 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
20816 output_operand_lossage ("invalid %%K value");
20818 print_operand_address (file
, x
);
20819 fputs ("@l", file
);
20822 /* %l is output_asm_label. */
20825 /* Write second word of DImode or DFmode reference. Works on register
20826 or non-indexed memory only. */
20828 fputs (reg_names
[REGNO (x
) + 1], file
);
20829 else if (MEM_P (x
))
20831 machine_mode mode
= GET_MODE (x
);
20832 /* Handle possible auto-increment. Since it is pre-increment and
20833 we have already done it, we can just use an offset of word. */
20834 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
20835 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
20836 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
20838 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
20839 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
20842 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
20846 if (small_data_operand (x
, GET_MODE (x
)))
20847 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
20848 reg_names
[SMALL_DATA_REG
]);
20852 case 'N': /* Unused */
20853 /* Write the number of elements in the vector times 4. */
20854 if (GET_CODE (x
) != PARALLEL
)
20855 output_operand_lossage ("invalid %%N value");
20857 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
20860 case 'O': /* Unused */
20861 /* Similar, but subtract 1 first. */
20862 if (GET_CODE (x
) != PARALLEL
)
20863 output_operand_lossage ("invalid %%O value");
20865 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
20869 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20872 || (i
= exact_log2 (INTVAL (x
))) < 0)
20873 output_operand_lossage ("invalid %%p value");
20875 fprintf (file
, "%d", i
);
20879 /* The operand must be an indirect memory reference. The result
20880 is the register name. */
20881 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
20882 || REGNO (XEXP (x
, 0)) >= 32)
20883 output_operand_lossage ("invalid %%P value");
20885 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
20889 /* This outputs the logical code corresponding to a boolean
20890 expression. The expression may have one or both operands
20891 negated (if one, only the first one). For condition register
20892 logical operations, it will also treat the negated
20893 CR codes as NOTs, but not handle NOTs of them. */
20895 const char *const *t
= 0;
20897 enum rtx_code code
= GET_CODE (x
);
20898 static const char * const tbl
[3][3] = {
20899 { "and", "andc", "nor" },
20900 { "or", "orc", "nand" },
20901 { "xor", "eqv", "xor" } };
20905 else if (code
== IOR
)
20907 else if (code
== XOR
)
20910 output_operand_lossage ("invalid %%q value");
20912 if (GET_CODE (XEXP (x
, 0)) != NOT
)
20916 if (GET_CODE (XEXP (x
, 1)) == NOT
)
20927 if (! TARGET_MFCRF
)
20933 /* X is a CR register. Print the mask for `mtcrf'. */
20934 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20935 output_operand_lossage ("invalid %%R value");
20937 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
20941 /* Low 5 bits of 32 - value */
20943 output_operand_lossage ("invalid %%s value");
20945 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
20949 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20950 if (!REG_P (x
) || GET_MODE (x
) != CCmode
)
20952 output_operand_lossage ("invalid %%t value");
20956 /* Bit 3 is OV bit. */
20957 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
20959 /* If we want bit 31, write a shift count of zero, not 32. */
20960 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
20964 /* Print the symbolic name of a branch target register. */
20965 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_PLTSEQ
)
20966 x
= XVECEXP (x
, 0, 0);
20967 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
20968 && REGNO (x
) != CTR_REGNO
))
20969 output_operand_lossage ("invalid %%T value");
20970 else if (REGNO (x
) == LR_REGNO
)
20971 fputs ("lr", file
);
20973 fputs ("ctr", file
);
20977 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20978 for use in unsigned operand. */
20981 output_operand_lossage ("invalid %%u value");
20986 if ((uval
& 0xffff) == 0)
20989 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
20993 /* High-order 16 bits of constant for use in signed operand. */
20995 output_operand_lossage ("invalid %%v value");
20997 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
20998 (INTVAL (x
) >> 16) & 0xffff);
21002 /* Print `u' if this has an auto-increment or auto-decrement. */
21004 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
21005 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
21006 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
21011 /* Print the trap code for this operand. */
21012 switch (GET_CODE (x
))
21015 fputs ("eq", file
); /* 4 */
21018 fputs ("ne", file
); /* 24 */
21021 fputs ("lt", file
); /* 16 */
21024 fputs ("le", file
); /* 20 */
21027 fputs ("gt", file
); /* 8 */
21030 fputs ("ge", file
); /* 12 */
21033 fputs ("llt", file
); /* 2 */
21036 fputs ("lle", file
); /* 6 */
21039 fputs ("lgt", file
); /* 1 */
21042 fputs ("lge", file
); /* 5 */
21045 output_operand_lossage ("invalid %%V value");
21050 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21053 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
21054 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
21056 print_operand (file
, x
, 0);
21060 /* X is a FPR or Altivec register used in a VSX context. */
21061 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
21062 output_operand_lossage ("invalid %%x value");
21065 int reg
= REGNO (x
);
21066 int vsx_reg
= (FP_REGNO_P (reg
)
21068 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
21070 #ifdef TARGET_REGNAMES
21071 if (TARGET_REGNAMES
)
21072 fprintf (file
, "%%vs%d", vsx_reg
);
21075 fprintf (file
, "%d", vsx_reg
);
21081 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
21082 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
21083 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
21088 /* Like 'L', for third word of TImode/PTImode */
21090 fputs (reg_names
[REGNO (x
) + 2], file
);
21091 else if (MEM_P (x
))
21093 machine_mode mode
= GET_MODE (x
);
21094 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21095 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21096 output_address (mode
, plus_constant (Pmode
,
21097 XEXP (XEXP (x
, 0), 0), 8));
21098 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21099 output_address (mode
, plus_constant (Pmode
,
21100 XEXP (XEXP (x
, 0), 0), 8));
21102 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
21103 if (small_data_operand (x
, GET_MODE (x
)))
21104 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21105 reg_names
[SMALL_DATA_REG
]);
21110 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_PLTSEQ
)
21111 x
= XVECEXP (x
, 0, 1);
21112 /* X is a SYMBOL_REF. Write out the name preceded by a
21113 period and without any trailing data in brackets. Used for function
21114 names. If we are configured for System V (or the embedded ABI) on
21115 the PowerPC, do not emit the period, since those systems do not use
21116 TOCs and the like. */
21117 if (!SYMBOL_REF_P (x
))
21119 output_operand_lossage ("invalid %%z value");
21123 /* For macho, check to see if we need a stub. */
21126 const char *name
= XSTR (x
, 0);
21128 if (darwin_emit_branch_islands
21129 && MACHOPIC_INDIRECT
21130 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
21131 name
= machopic_indirection_name (x
, /*stub_p=*/true);
21133 assemble_name (file
, name
);
21135 else if (!DOT_SYMBOLS
)
21136 assemble_name (file
, XSTR (x
, 0));
21138 rs6000_output_function_entry (file
, XSTR (x
, 0));
21142 /* Like 'L', for last word of TImode/PTImode. */
21144 fputs (reg_names
[REGNO (x
) + 3], file
);
21145 else if (MEM_P (x
))
21147 machine_mode mode
= GET_MODE (x
);
21148 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21149 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21150 output_address (mode
, plus_constant (Pmode
,
21151 XEXP (XEXP (x
, 0), 0), 12));
21152 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21153 output_address (mode
, plus_constant (Pmode
,
21154 XEXP (XEXP (x
, 0), 0), 12));
21156 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
21157 if (small_data_operand (x
, GET_MODE (x
)))
21158 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21159 reg_names
[SMALL_DATA_REG
]);
21163 /* Print AltiVec memory operand. */
21168 gcc_assert (MEM_P (x
));
21172 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x
))
21173 && GET_CODE (tmp
) == AND
21174 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21175 && INTVAL (XEXP (tmp
, 1)) == -16)
21176 tmp
= XEXP (tmp
, 0);
21177 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21178 && GET_CODE (tmp
) == PRE_MODIFY
)
21179 tmp
= XEXP (tmp
, 1);
21181 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21184 if (GET_CODE (tmp
) != PLUS
21185 || !REG_P (XEXP (tmp
, 0))
21186 || !REG_P (XEXP (tmp
, 1)))
21188 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21192 if (REGNO (XEXP (tmp
, 0)) == 0)
21193 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21194 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21196 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21197 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21204 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21205 else if (MEM_P (x
))
21207 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21208 know the width from the mode. */
21209 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21210 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21211 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21212 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21213 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21214 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21215 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21216 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21218 output_address (GET_MODE (x
), XEXP (x
, 0));
21220 else if (toc_relative_expr_p (x
, false,
21221 &tocrel_base_oac
, &tocrel_offset_oac
))
21222 /* This hack along with a corresponding hack in
21223 rs6000_output_addr_const_extra arranges to output addends
21224 where the assembler expects to find them. eg.
21225 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21226 without this hack would be output as "x@toc+4". We
21228 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21229 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLSGD
)
21230 output_addr_const (file
, XVECEXP (x
, 0, 0));
21231 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_PLTSEQ
)
21232 output_addr_const (file
, XVECEXP (x
, 0, 1));
21234 output_addr_const (file
, x
);
21238 if (const char *name
= get_some_local_dynamic_name ())
21239 assemble_name (file
, name
);
21241 output_operand_lossage ("'%%&' used without any "
21242 "local dynamic TLS references");
21246 output_operand_lossage ("invalid %%xn code");
21250 /* Print the address of an operand. */
21253 print_operand_address (FILE *file
, rtx x
)
21256 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21257 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21258 || GET_CODE (x
) == LABEL_REF
)
21260 output_addr_const (file
, x
);
21261 if (small_data_operand (x
, GET_MODE (x
)))
21262 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21263 reg_names
[SMALL_DATA_REG
]);
21265 gcc_assert (!TARGET_TOC
);
21267 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21268 && REG_P (XEXP (x
, 1)))
21270 if (REGNO (XEXP (x
, 0)) == 0)
21271 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21272 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21274 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21275 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21277 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21278 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21279 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21280 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21282 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21283 && CONSTANT_P (XEXP (x
, 1)))
21285 fprintf (file
, "lo16(");
21286 output_addr_const (file
, XEXP (x
, 1));
21287 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21291 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21292 && CONSTANT_P (XEXP (x
, 1)))
21294 output_addr_const (file
, XEXP (x
, 1));
21295 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21298 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21300 /* This hack along with a corresponding hack in
21301 rs6000_output_addr_const_extra arranges to output addends
21302 where the assembler expects to find them. eg.
21304 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21305 without this hack would be output as "x@toc+8@l(9)". We
21306 want "x+8@toc@l(9)". */
21307 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21308 if (GET_CODE (x
) == LO_SUM
)
21309 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21311 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21314 output_addr_const (file
, x
);
21317 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21320 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21322 if (GET_CODE (x
) == UNSPEC
)
21323 switch (XINT (x
, 1))
21325 case UNSPEC_TOCREL
:
21326 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21327 && REG_P (XVECEXP (x
, 0, 1))
21328 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21329 output_addr_const (file
, XVECEXP (x
, 0, 0));
21330 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21332 if (INTVAL (tocrel_offset_oac
) >= 0)
21333 fprintf (file
, "+");
21334 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21336 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21339 assemble_name (file
, toc_label_name
);
21342 else if (TARGET_ELF
)
21343 fputs ("@toc", file
);
21347 case UNSPEC_MACHOPIC_OFFSET
:
21348 output_addr_const (file
, XVECEXP (x
, 0, 0));
21350 machopic_output_function_base_name (file
);
21357 /* Target hook for assembling integer objects. The PowerPC version has
21358 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21359 is defined. It also needs to handle DI-mode objects on 64-bit
21363 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21365 #ifdef RELOCATABLE_NEEDS_FIXUP
21366 /* Special handling for SI values. */
21367 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21369 static int recurse
= 0;
21371 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21372 the .fixup section. Since the TOC section is already relocated, we
21373 don't need to mark it here. We used to skip the text section, but it
21374 should never be valid for relocated addresses to be placed in the text
21376 if (DEFAULT_ABI
== ABI_V4
21377 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21378 && in_section
!= toc_section
21380 && !CONST_SCALAR_INT_P (x
)
21386 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21388 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21389 fprintf (asm_out_file
, "\t.long\t(");
21390 output_addr_const (asm_out_file
, x
);
21391 fprintf (asm_out_file
, ")@fixup\n");
21392 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21393 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21394 fprintf (asm_out_file
, "\t.long\t");
21395 assemble_name (asm_out_file
, buf
);
21396 fprintf (asm_out_file
, "\n\t.previous\n");
21400 /* Remove initial .'s to turn a -mcall-aixdesc function
21401 address into the address of the descriptor, not the function
21403 else if (GET_CODE (x
) == SYMBOL_REF
21404 && XSTR (x
, 0)[0] == '.'
21405 && DEFAULT_ABI
== ABI_AIX
)
21407 const char *name
= XSTR (x
, 0);
21408 while (*name
== '.')
21411 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21415 #endif /* RELOCATABLE_NEEDS_FIXUP */
21416 return default_assemble_integer (x
, size
, aligned_p
);
21419 /* Return a template string for assembly to emit when making an
21420 external call. FUNOP is the call mem argument operand number. */
21422 static const char *
21423 rs6000_call_template_1 (rtx
*operands
, unsigned int funop
, bool sibcall
)
21425 /* -Wformat-overflow workaround, without which gcc thinks that %u
21426 might produce 10 digits. */
21427 gcc_assert (funop
<= MAX_RECOG_OPERANDS
);
21431 if (TARGET_TLS_MARKERS
&& GET_CODE (operands
[funop
+ 1]) == UNSPEC
)
21433 if (XINT (operands
[funop
+ 1], 1) == UNSPEC_TLSGD
)
21434 sprintf (arg
, "(%%%u@tlsgd)", funop
+ 1);
21435 else if (XINT (operands
[funop
+ 1], 1) == UNSPEC_TLSLD
)
21436 sprintf (arg
, "(%%&@tlsld)");
21438 gcc_unreachable ();
21441 /* The magic 32768 offset here corresponds to the offset of
21442 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21444 sprintf (z
, "%%z%u%s", funop
,
21445 (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
== 2
21448 static char str
[32]; /* 2 spare */
21449 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
21450 sprintf (str
, "b%s %s%s%s", sibcall
? "" : "l", z
, arg
,
21451 sibcall
? "" : "\n\tnop");
21452 else if (DEFAULT_ABI
== ABI_V4
)
21453 sprintf (str
, "b%s %s%s%s", sibcall
? "" : "l", z
, arg
,
21454 flag_pic
? "@plt" : "");
21456 gcc_unreachable ();
21461 rs6000_call_template (rtx
*operands
, unsigned int funop
)
21463 return rs6000_call_template_1 (operands
, funop
, false);
21467 rs6000_sibcall_template (rtx
*operands
, unsigned int funop
)
21469 return rs6000_call_template_1 (operands
, funop
, true);
21472 /* As above, for indirect calls. */
21474 static const char *
21475 rs6000_indirect_call_template_1 (rtx
*operands
, unsigned int funop
,
21478 /* -Wformat-overflow workaround, without which gcc thinks that %u
21479 might produce 10 digits. */
21480 gcc_assert (funop
<= MAX_RECOG_OPERANDS
);
21482 static char str
[144]; /* 1 spare */
21484 const char *ptrload
= TARGET_64BIT
? "d" : "wz";
21486 if (DEFAULT_ABI
== ABI_AIX
)
21489 ptrload
, funop
+ 2);
21491 /* We don't need the extra code to stop indirect call speculation if
21493 bool speculate
= (TARGET_MACHO
21494 || rs6000_speculate_indirect_jumps
21495 || (REG_P (operands
[funop
])
21496 && REGNO (operands
[funop
]) == LR_REGNO
));
21498 if (!TARGET_MACHO
&& HAVE_AS_PLTSEQ
&& GET_CODE (operands
[funop
]) == UNSPEC
)
21500 const char *rel64
= TARGET_64BIT
? "64" : "";
21503 if (GET_CODE (operands
[funop
+ 1]) == UNSPEC
)
21505 if (XINT (operands
[funop
+ 1], 1) == UNSPEC_TLSGD
)
21506 sprintf (tls
, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21508 else if (XINT (operands
[funop
+ 1], 1) == UNSPEC_TLSLD
)
21509 sprintf (tls
, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21512 gcc_unreachable ();
21515 const char *addend
= (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
21516 && flag_pic
== 2 ? "+32768" : "");
21520 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21521 tls
, rel64
, funop
, addend
);
21522 s
+= sprintf (s
, "crset 2\n\t");
21525 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21526 tls
, rel64
, funop
, addend
);
21528 else if (!speculate
)
21529 s
+= sprintf (s
, "crset 2\n\t");
21531 if (DEFAULT_ABI
== ABI_AIX
)
21537 funop
, ptrload
, funop
+ 3);
21542 funop
, ptrload
, funop
+ 3);
21544 else if (DEFAULT_ABI
== ABI_ELFv2
)
21550 funop
, ptrload
, funop
+ 2);
21555 funop
, ptrload
, funop
+ 2);
21562 funop
, sibcall
? "" : "l");
21566 funop
, sibcall
? "" : "l", sibcall
? "\n\tb $" : "");
21572 rs6000_indirect_call_template (rtx
*operands
, unsigned int funop
)
21574 return rs6000_indirect_call_template_1 (operands
, funop
, false);
21578 rs6000_indirect_sibcall_template (rtx
*operands
, unsigned int funop
)
21580 return rs6000_indirect_call_template_1 (operands
, funop
, true);
21584 /* Output indirect call insns.
21585 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21587 rs6000_pltseq_template (rtx
*operands
, int which
)
21589 const char *rel64
= TARGET_64BIT
? "64" : "";
21592 if (GET_CODE (operands
[3]) == UNSPEC
)
21594 if (XINT (operands
[3], 1) == UNSPEC_TLSGD
)
21595 sprintf (tls
, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21597 else if (XINT (operands
[3], 1) == UNSPEC_TLSLD
)
21598 sprintf (tls
, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21601 gcc_unreachable ();
21604 gcc_assert (DEFAULT_ABI
== ABI_ELFv2
|| DEFAULT_ABI
== ABI_V4
);
21605 static char str
[96]; /* 15 spare */
21606 const char *off
= WORDS_BIG_ENDIAN
? "+2" : "";
21607 const char *addend
= (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
21608 && flag_pic
== 2 ? "+32768" : "");
21613 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21615 tls
, rel64
, TARGET_64BIT
? "d 2,24(1)" : "w 2,12(1)");
21618 if (DEFAULT_ABI
== ABI_V4
&& !flag_pic
)
21620 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21625 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21627 tls
, off
, rel64
, addend
);
21631 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21633 tls
, off
, rel64
, TARGET_64BIT
? "_DS" : "", addend
,
21634 TARGET_64BIT
? "d" : "wz");
21638 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21640 tls
, rel64
, addend
);
21643 gcc_unreachable ();
21649 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21650 /* Emit an assembler directive to set symbol visibility for DECL to
21651 VISIBILITY_TYPE. */
21654 rs6000_assemble_visibility (tree decl
, int vis
)
21659 /* Functions need to have their entry point symbol visibility set as
21660 well as their descriptor symbol visibility. */
21661 if (DEFAULT_ABI
== ABI_AIX
21663 && TREE_CODE (decl
) == FUNCTION_DECL
)
21665 static const char * const visibility_types
[] = {
21666 NULL
, "protected", "hidden", "internal"
21669 const char *name
, *type
;
21671 name
= ((* targetm
.strip_name_encoding
)
21672 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21673 type
= visibility_types
[vis
];
21675 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21676 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21679 default_assemble_visibility (decl
, vis
);
21684 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21686 /* Reversal of FP compares takes care -- an ordered compare
21687 becomes an unordered compare and vice versa. */
21688 if (mode
== CCFPmode
21689 && (!flag_finite_math_only
21690 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21691 || code
== UNEQ
|| code
== LTGT
))
21692 return reverse_condition_maybe_unordered (code
);
21694 return reverse_condition (code
);
21697 /* Generate a compare for CODE. Return a brand-new rtx that
21698 represents the result of the compare. */
21701 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21703 machine_mode comp_mode
;
21704 rtx compare_result
;
21705 enum rtx_code code
= GET_CODE (cmp
);
21706 rtx op0
= XEXP (cmp
, 0);
21707 rtx op1
= XEXP (cmp
, 1);
21709 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21710 comp_mode
= CCmode
;
21711 else if (FLOAT_MODE_P (mode
))
21712 comp_mode
= CCFPmode
;
21713 else if (code
== GTU
|| code
== LTU
21714 || code
== GEU
|| code
== LEU
)
21715 comp_mode
= CCUNSmode
;
21716 else if ((code
== EQ
|| code
== NE
)
21717 && unsigned_reg_p (op0
)
21718 && (unsigned_reg_p (op1
)
21719 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21720 /* These are unsigned values, perhaps there will be a later
21721 ordering compare that can be shared with this one. */
21722 comp_mode
= CCUNSmode
;
21724 comp_mode
= CCmode
;
21726 /* If we have an unsigned compare, make sure we don't have a signed value as
21728 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21729 && INTVAL (op1
) < 0)
21731 op0
= copy_rtx_if_shared (op0
);
21732 op1
= force_reg (GET_MODE (op0
), op1
);
21733 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
21736 /* First, the compare. */
21737 compare_result
= gen_reg_rtx (comp_mode
);
21739 /* IEEE 128-bit support in VSX registers when we do not have hardware
21741 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21743 rtx libfunc
= NULL_RTX
;
21744 bool check_nan
= false;
21751 libfunc
= optab_libfunc (eq_optab
, mode
);
21756 libfunc
= optab_libfunc (ge_optab
, mode
);
21761 libfunc
= optab_libfunc (le_optab
, mode
);
21766 libfunc
= optab_libfunc (unord_optab
, mode
);
21767 code
= (code
== UNORDERED
) ? NE
: EQ
;
21773 libfunc
= optab_libfunc (ge_optab
, mode
);
21774 code
= (code
== UNGE
) ? GE
: GT
;
21780 libfunc
= optab_libfunc (le_optab
, mode
);
21781 code
= (code
== UNLE
) ? LE
: LT
;
21787 libfunc
= optab_libfunc (eq_optab
, mode
);
21788 code
= (code
= UNEQ
) ? EQ
: NE
;
21792 gcc_unreachable ();
21795 gcc_assert (libfunc
);
21798 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21799 SImode
, op0
, mode
, op1
, mode
);
21801 /* The library signals an exception for signalling NaNs, so we need to
21802 handle isgreater, etc. by first checking isordered. */
21805 rtx ne_rtx
, normal_dest
, unord_dest
;
21806 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
21807 rtx join_label
= gen_label_rtx ();
21808 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
21809 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
21812 /* Test for either value being a NaN. */
21813 gcc_assert (unord_func
);
21814 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
21815 SImode
, op0
, mode
, op1
, mode
);
21817 /* Set value (0) if either value is a NaN, and jump to the join
21819 dest
= gen_reg_rtx (SImode
);
21820 emit_move_insn (dest
, const1_rtx
);
21821 emit_insn (gen_rtx_SET (unord_cmp
,
21822 gen_rtx_COMPARE (comp_mode
, unord_dest
,
21825 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
21826 emit_jump_insn (gen_rtx_SET (pc_rtx
,
21827 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
21831 /* Do the normal comparison, knowing that the values are not
21833 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21834 SImode
, op0
, mode
, op1
, mode
);
21836 emit_insn (gen_cstoresi4 (dest
,
21837 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
21839 normal_dest
, const0_rtx
));
21841 /* Join NaN and non-Nan paths. Compare dest against 0. */
21842 emit_label (join_label
);
21846 emit_insn (gen_rtx_SET (compare_result
,
21847 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
21852 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21853 CLOBBERs to match cmptf_internal2 pattern. */
21854 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
21855 && FLOAT128_IBM_P (GET_MODE (op0
))
21856 && TARGET_HARD_FLOAT
)
21857 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
21859 gen_rtx_SET (compare_result
,
21860 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
21861 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21862 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21863 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21864 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21865 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21866 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21867 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21868 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21869 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
21870 else if (GET_CODE (op1
) == UNSPEC
21871 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
21873 rtx op1b
= XVECEXP (op1
, 0, 0);
21874 comp_mode
= CCEQmode
;
21875 compare_result
= gen_reg_rtx (CCEQmode
);
21877 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
21879 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
21882 emit_insn (gen_rtx_SET (compare_result
,
21883 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
21886 /* Some kinds of FP comparisons need an OR operation;
21887 under flag_finite_math_only we don't bother. */
21888 if (FLOAT_MODE_P (mode
)
21889 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
21890 && !flag_finite_math_only
21891 && (code
== LE
|| code
== GE
21892 || code
== UNEQ
|| code
== LTGT
21893 || code
== UNGT
|| code
== UNLT
))
21895 enum rtx_code or1
, or2
;
21896 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
21897 rtx or_result
= gen_reg_rtx (CCEQmode
);
21901 case LE
: or1
= LT
; or2
= EQ
; break;
21902 case GE
: or1
= GT
; or2
= EQ
; break;
21903 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
21904 case LTGT
: or1
= LT
; or2
= GT
; break;
21905 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
21906 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
21907 default: gcc_unreachable ();
21909 validate_condition_mode (or1
, comp_mode
);
21910 validate_condition_mode (or2
, comp_mode
);
21911 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
21912 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
21913 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
21914 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
21916 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
21918 compare_result
= or_result
;
21922 validate_condition_mode (code
, GET_MODE (compare_result
));
21924 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
21928 /* Return the diagnostic message string if the binary operation OP is
21929 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21932 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
21936 machine_mode mode1
= TYPE_MODE (type1
);
21937 machine_mode mode2
= TYPE_MODE (type2
);
21939 /* For complex modes, use the inner type. */
21940 if (COMPLEX_MODE_P (mode1
))
21941 mode1
= GET_MODE_INNER (mode1
);
21943 if (COMPLEX_MODE_P (mode2
))
21944 mode2
= GET_MODE_INNER (mode2
);
21946 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21947 double to intermix unless -mfloat128-convert. */
21948 if (mode1
== mode2
)
21951 if (!TARGET_FLOAT128_CVT
)
21953 if ((mode1
== KFmode
&& mode2
== IFmode
)
21954 || (mode1
== IFmode
&& mode2
== KFmode
))
21955 return N_("__float128 and __ibm128 cannot be used in the same "
21958 if (TARGET_IEEEQUAD
21959 && ((mode1
== IFmode
&& mode2
== TFmode
)
21960 || (mode1
== TFmode
&& mode2
== IFmode
)))
21961 return N_("__ibm128 and long double cannot be used in the same "
21964 if (!TARGET_IEEEQUAD
21965 && ((mode1
== KFmode
&& mode2
== TFmode
)
21966 || (mode1
== TFmode
&& mode2
== KFmode
)))
21967 return N_("__float128 and long double cannot be used in the same "
21975 /* Expand floating point conversion to/from __float128 and __ibm128. */
21978 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
21980 machine_mode dest_mode
= GET_MODE (dest
);
21981 machine_mode src_mode
= GET_MODE (src
);
21982 convert_optab cvt
= unknown_optab
;
21983 bool do_move
= false;
21984 rtx libfunc
= NULL_RTX
;
21986 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
21987 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
21991 rtx_2func_t from_df
;
21992 rtx_2func_t from_sf
;
21993 rtx_2func_t from_si_sign
;
21994 rtx_2func_t from_si_uns
;
21995 rtx_2func_t from_di_sign
;
21996 rtx_2func_t from_di_uns
;
21999 rtx_2func_t to_si_sign
;
22000 rtx_2func_t to_si_uns
;
22001 rtx_2func_t to_di_sign
;
22002 rtx_2func_t to_di_uns
;
22003 } hw_conversions
[2] = {
22004 /* convertions to/from KFmode */
22006 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
22007 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
22008 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
22009 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
22010 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
22011 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
22012 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
22013 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
22014 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
22015 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
22016 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
22017 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
22020 /* convertions to/from TFmode */
22022 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
22023 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
22024 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
22025 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
22026 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
22027 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
22028 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
22029 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
22030 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
22031 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
22032 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
22033 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
22037 if (dest_mode
== src_mode
)
22038 gcc_unreachable ();
22040 /* Eliminate memory operations. */
22042 src
= force_reg (src_mode
, src
);
22046 rtx tmp
= gen_reg_rtx (dest_mode
);
22047 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
22048 rs6000_emit_move (dest
, tmp
, dest_mode
);
22052 /* Convert to IEEE 128-bit floating point. */
22053 if (FLOAT128_IEEE_P (dest_mode
))
22055 if (dest_mode
== KFmode
)
22057 else if (dest_mode
== TFmode
)
22060 gcc_unreachable ();
22066 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
22071 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
22077 if (FLOAT128_IBM_P (src_mode
))
22086 cvt
= ufloat_optab
;
22087 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
22091 cvt
= sfloat_optab
;
22092 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
22099 cvt
= ufloat_optab
;
22100 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
22104 cvt
= sfloat_optab
;
22105 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
22110 gcc_unreachable ();
22114 /* Convert from IEEE 128-bit floating point. */
22115 else if (FLOAT128_IEEE_P (src_mode
))
22117 if (src_mode
== KFmode
)
22119 else if (src_mode
== TFmode
)
22122 gcc_unreachable ();
22128 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
22133 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
22139 if (FLOAT128_IBM_P (dest_mode
))
22149 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
22154 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
22162 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
22167 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
22172 gcc_unreachable ();
22176 /* Both IBM format. */
22177 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
22181 gcc_unreachable ();
22183 /* Handle conversion between TFmode/KFmode/IFmode. */
22185 emit_insn (gen_rtx_SET (dest
, gen_rtx_FLOAT_EXTEND (dest_mode
, src
)));
22187 /* Handle conversion if we have hardware support. */
22188 else if (TARGET_FLOAT128_HW
&& hw_convert
)
22189 emit_insn ((hw_convert
) (dest
, src
));
22191 /* Call an external function to do the conversion. */
22192 else if (cvt
!= unknown_optab
)
22194 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
22195 gcc_assert (libfunc
!= NULL_RTX
);
22197 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
,
22200 gcc_assert (dest2
!= NULL_RTX
);
22201 if (!rtx_equal_p (dest
, dest2
))
22202 emit_move_insn (dest
, dest2
);
22206 gcc_unreachable ();
22212 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22213 can be used as that dest register. Return the dest register. */
22216 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
22218 if (op2
== const0_rtx
)
22221 if (GET_CODE (scratch
) == SCRATCH
)
22222 scratch
= gen_reg_rtx (mode
);
22224 if (logical_operand (op2
, mode
))
22225 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
22227 emit_insn (gen_rtx_SET (scratch
,
22228 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
22234 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
22237 machine_mode op_mode
;
22238 enum rtx_code cond_code
;
22239 rtx result
= operands
[0];
22241 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
22242 cond_code
= GET_CODE (condition_rtx
);
22244 if (cond_code
== NE
22245 || cond_code
== GE
|| cond_code
== LE
22246 || cond_code
== GEU
|| cond_code
== LEU
22247 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
22249 rtx not_result
= gen_reg_rtx (CCEQmode
);
22250 rtx not_op
, rev_cond_rtx
;
22251 machine_mode cc_mode
;
22253 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
22255 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
22256 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
22257 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
22258 emit_insn (gen_rtx_SET (not_result
, not_op
));
22259 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
22262 op_mode
= GET_MODE (XEXP (operands
[1], 0));
22263 if (op_mode
== VOIDmode
)
22264 op_mode
= GET_MODE (XEXP (operands
[1], 1));
22266 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
22268 PUT_MODE (condition_rtx
, DImode
);
22269 convert_move (result
, condition_rtx
, 0);
22273 PUT_MODE (condition_rtx
, SImode
);
22274 emit_insn (gen_rtx_SET (result
, condition_rtx
));
22278 /* Emit a branch of kind CODE to location LOC. */
22281 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
22283 rtx condition_rtx
, loc_ref
;
22285 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
22286 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
22287 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22288 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
22289 loc_ref
, pc_rtx
)));
22292 /* Return the string to output a conditional branch to LABEL, which is
22293 the operand template of the label, or NULL if the branch is really a
22294 conditional return.
22296 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22297 condition code register and its mode specifies what kind of
22298 comparison we made.
22300 REVERSED is nonzero if we should reverse the sense of the comparison.
22302 INSN is the insn. */
22305 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
22307 static char string
[64];
22308 enum rtx_code code
= GET_CODE (op
);
22309 rtx cc_reg
= XEXP (op
, 0);
22310 machine_mode mode
= GET_MODE (cc_reg
);
22311 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
22312 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
22313 int really_reversed
= reversed
^ need_longbranch
;
22319 validate_condition_mode (code
, mode
);
22321 /* Work out which way this really branches. We could use
22322 reverse_condition_maybe_unordered here always but this
22323 makes the resulting assembler clearer. */
22324 if (really_reversed
)
22326 /* Reversal of FP compares takes care -- an ordered compare
22327 becomes an unordered compare and vice versa. */
22328 if (mode
== CCFPmode
)
22329 code
= reverse_condition_maybe_unordered (code
);
22331 code
= reverse_condition (code
);
22336 /* Not all of these are actually distinct opcodes, but
22337 we distinguish them for clarity of the resulting assembler. */
22338 case NE
: case LTGT
:
22339 ccode
= "ne"; break;
22340 case EQ
: case UNEQ
:
22341 ccode
= "eq"; break;
22343 ccode
= "ge"; break;
22344 case GT
: case GTU
: case UNGT
:
22345 ccode
= "gt"; break;
22347 ccode
= "le"; break;
22348 case LT
: case LTU
: case UNLT
:
22349 ccode
= "lt"; break;
22350 case UNORDERED
: ccode
= "un"; break;
22351 case ORDERED
: ccode
= "nu"; break;
22352 case UNGE
: ccode
= "nl"; break;
22353 case UNLE
: ccode
= "ng"; break;
22355 gcc_unreachable ();
22358 /* Maybe we have a guess as to how likely the branch is. */
22360 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
22361 if (note
!= NULL_RTX
)
22363 /* PROB is the difference from 50%. */
22364 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
22365 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
22367 /* Only hint for highly probable/improbable branches on newer cpus when
22368 we have real profile data, as static prediction overrides processor
22369 dynamic prediction. For older cpus we may as well always hint, but
22370 assume not taken for branches that are very close to 50% as a
22371 mispredicted taken branch is more expensive than a
22372 mispredicted not-taken branch. */
22373 if (rs6000_always_hint
22374 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
22375 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
22376 && br_prob_note_reliable_p (note
)))
22378 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
22379 && ((prob
> 0) ^ need_longbranch
))
22387 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
22389 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
22391 /* We need to escape any '%' characters in the reg_names string.
22392 Assume they'd only be the first character.... */
22393 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
22395 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
22399 /* If the branch distance was too far, we may have to use an
22400 unconditional branch to go the distance. */
22401 if (need_longbranch
)
22402 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22404 s
+= sprintf (s
, ",%s", label
);
22410 /* Return insn for VSX or Altivec comparisons. */
22413 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22416 machine_mode mode
= GET_MODE (op0
);
22424 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22435 mask
= gen_reg_rtx (mode
);
22436 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22443 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22444 DMODE is expected destination mode. This is a recursive function. */
22447 rs6000_emit_vector_compare (enum rtx_code rcode
,
22449 machine_mode dmode
)
22452 bool swap_operands
= false;
22453 bool try_again
= false;
22455 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22456 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22458 /* See if the comparison works as is. */
22459 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22467 swap_operands
= true;
22472 swap_operands
= true;
22480 /* Invert condition and try again.
22481 e.g., A != B becomes ~(A==B). */
22483 enum rtx_code rev_code
;
22484 enum insn_code nor_code
;
22487 rev_code
= reverse_condition_maybe_unordered (rcode
);
22488 if (rev_code
== UNKNOWN
)
22491 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22492 if (nor_code
== CODE_FOR_nothing
)
22495 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22499 mask
= gen_reg_rtx (dmode
);
22500 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22508 /* Try GT/GTU/LT/LTU OR EQ */
22511 enum insn_code ior_code
;
22512 enum rtx_code new_code
;
22533 gcc_unreachable ();
22536 ior_code
= optab_handler (ior_optab
, dmode
);
22537 if (ior_code
== CODE_FOR_nothing
)
22540 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22544 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22548 mask
= gen_reg_rtx (dmode
);
22549 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22560 std::swap (op0
, op1
);
22562 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22567 /* You only get two chances. */
22571 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22572 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22573 operands for the relation operation COND. */
22576 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22577 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22579 machine_mode dest_mode
= GET_MODE (dest
);
22580 machine_mode mask_mode
= GET_MODE (cc_op0
);
22581 enum rtx_code rcode
= GET_CODE (cond
);
22582 machine_mode cc_mode
= CCmode
;
22585 bool invert_move
= false;
22587 if (VECTOR_UNIT_NONE_P (dest_mode
))
22590 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22591 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22595 /* Swap operands if we can, and fall back to doing the operation as
22596 specified, and doing a NOR to invert the test. */
22602 /* Invert condition and try again.
22603 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22604 invert_move
= true;
22605 rcode
= reverse_condition_maybe_unordered (rcode
);
22606 if (rcode
== UNKNOWN
)
22612 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22614 /* Invert condition to avoid compound test. */
22615 invert_move
= true;
22616 rcode
= reverse_condition (rcode
);
22624 /* Mark unsigned tests with CCUNSmode. */
22625 cc_mode
= CCUNSmode
;
22627 /* Invert condition to avoid compound test if necessary. */
22628 if (rcode
== GEU
|| rcode
== LEU
)
22630 invert_move
= true;
22631 rcode
= reverse_condition (rcode
);
22639 /* Get the vector mask for the given relational operations. */
22640 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22646 std::swap (op_true
, op_false
);
22648 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22649 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22650 && (GET_CODE (op_true
) == CONST_VECTOR
22651 || GET_CODE (op_false
) == CONST_VECTOR
))
22653 rtx constant_0
= CONST0_RTX (dest_mode
);
22654 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22656 if (op_true
== constant_m1
&& op_false
== constant_0
)
22658 emit_move_insn (dest
, mask
);
22662 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22664 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22668 /* If we can't use the vector comparison directly, perhaps we can use
22669 the mask for the true or false fields, instead of loading up a
22671 if (op_true
== constant_m1
)
22674 if (op_false
== constant_0
)
22678 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22679 op_true
= force_reg (dest_mode
, op_true
);
22681 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22682 op_false
= force_reg (dest_mode
, op_false
);
22684 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22685 CONST0_RTX (dest_mode
));
22686 emit_insn (gen_rtx_SET (dest
,
22687 gen_rtx_IF_THEN_ELSE (dest_mode
,
22694 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22695 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22696 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22697 hardware has no such operation. */
22700 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22702 enum rtx_code code
= GET_CODE (op
);
22703 rtx op0
= XEXP (op
, 0);
22704 rtx op1
= XEXP (op
, 1);
22705 machine_mode compare_mode
= GET_MODE (op0
);
22706 machine_mode result_mode
= GET_MODE (dest
);
22707 bool max_p
= false;
22709 if (result_mode
!= compare_mode
)
22712 if (code
== GE
|| code
== GT
)
22714 else if (code
== LE
|| code
== LT
)
22719 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22722 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22728 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
22732 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22733 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22734 operands of the last comparison is nonzero/true, FALSE_COND if it is
22735 zero/false. Return 0 if the hardware has no such operation. */
22738 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22740 enum rtx_code code
= GET_CODE (op
);
22741 rtx op0
= XEXP (op
, 0);
22742 rtx op1
= XEXP (op
, 1);
22743 machine_mode result_mode
= GET_MODE (dest
);
22748 if (!can_create_pseudo_p ())
22761 code
= swap_condition (code
);
22762 std::swap (op0
, op1
);
22769 /* Generate: [(parallel [(set (dest)
22770 (if_then_else (op (cmp1) (cmp2))
22773 (clobber (scratch))])]. */
22775 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
22776 cmove_rtx
= gen_rtx_SET (dest
,
22777 gen_rtx_IF_THEN_ELSE (result_mode
,
22782 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
22783 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22784 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
22789 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22790 operands of the last comparison is nonzero/true, FALSE_COND if it
22791 is zero/false. Return 0 if the hardware has no such operation. */
22794 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22796 enum rtx_code code
= GET_CODE (op
);
22797 rtx op0
= XEXP (op
, 0);
22798 rtx op1
= XEXP (op
, 1);
22799 machine_mode compare_mode
= GET_MODE (op0
);
22800 machine_mode result_mode
= GET_MODE (dest
);
22802 bool is_against_zero
;
22804 /* These modes should always match. */
22805 if (GET_MODE (op1
) != compare_mode
22806 /* In the isel case however, we can use a compare immediate, so
22807 op1 may be a small constant. */
22808 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
22810 if (GET_MODE (true_cond
) != result_mode
)
22812 if (GET_MODE (false_cond
) != result_mode
)
22815 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22816 if (TARGET_P9_MINMAX
22817 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
22818 && (result_mode
== SFmode
|| result_mode
== DFmode
))
22820 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
22823 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
22827 /* Don't allow using floating point comparisons for integer results for
22829 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
22832 /* First, work out if the hardware can do this at all, or
22833 if it's too slow.... */
22834 if (!FLOAT_MODE_P (compare_mode
))
22837 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
22841 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
22843 /* A floating-point subtract might overflow, underflow, or produce
22844 an inexact result, thus changing the floating-point flags, so it
22845 can't be generated if we care about that. It's safe if one side
22846 of the construct is zero, since then no subtract will be
22848 if (SCALAR_FLOAT_MODE_P (compare_mode
)
22849 && flag_trapping_math
&& ! is_against_zero
)
22852 /* Eliminate half of the comparisons by switching operands, this
22853 makes the remaining code simpler. */
22854 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
22855 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
22857 code
= reverse_condition_maybe_unordered (code
);
22859 true_cond
= false_cond
;
22863 /* UNEQ and LTGT take four instructions for a comparison with zero,
22864 it'll probably be faster to use a branch here too. */
22865 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
22868 /* We're going to try to implement comparisons by performing
22869 a subtract, then comparing against zero. Unfortunately,
22870 Inf - Inf is NaN which is not zero, and so if we don't
22871 know that the operand is finite and the comparison
22872 would treat EQ different to UNORDERED, we can't do it. */
22873 if (HONOR_INFINITIES (compare_mode
)
22874 && code
!= GT
&& code
!= UNGE
22875 && (GET_CODE (op1
) != CONST_DOUBLE
22876 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
22877 /* Constructs of the form (a OP b ? a : b) are safe. */
22878 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
22879 || (! rtx_equal_p (op0
, true_cond
)
22880 && ! rtx_equal_p (op1
, true_cond
))))
22883 /* At this point we know we can use fsel. */
22885 /* Reduce the comparison to a comparison against zero. */
22886 if (! is_against_zero
)
22888 temp
= gen_reg_rtx (compare_mode
);
22889 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
22891 op1
= CONST0_RTX (compare_mode
);
22894 /* If we don't care about NaNs we can reduce some of the comparisons
22895 down to faster ones. */
22896 if (! HONOR_NANS (compare_mode
))
22902 true_cond
= false_cond
;
22915 /* Now, reduce everything down to a GE. */
22922 temp
= gen_reg_rtx (compare_mode
);
22923 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
22928 temp
= gen_reg_rtx (compare_mode
);
22929 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
22934 temp
= gen_reg_rtx (compare_mode
);
22935 emit_insn (gen_rtx_SET (temp
,
22936 gen_rtx_NEG (compare_mode
,
22937 gen_rtx_ABS (compare_mode
, op0
))));
22942 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22943 temp
= gen_reg_rtx (result_mode
);
22944 emit_insn (gen_rtx_SET (temp
,
22945 gen_rtx_IF_THEN_ELSE (result_mode
,
22946 gen_rtx_GE (VOIDmode
,
22948 true_cond
, false_cond
)));
22949 false_cond
= true_cond
;
22952 temp
= gen_reg_rtx (compare_mode
);
22953 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
22958 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22959 temp
= gen_reg_rtx (result_mode
);
22960 emit_insn (gen_rtx_SET (temp
,
22961 gen_rtx_IF_THEN_ELSE (result_mode
,
22962 gen_rtx_GE (VOIDmode
,
22964 true_cond
, false_cond
)));
22965 true_cond
= false_cond
;
22968 temp
= gen_reg_rtx (compare_mode
);
22969 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
22974 gcc_unreachable ();
22977 emit_insn (gen_rtx_SET (dest
,
22978 gen_rtx_IF_THEN_ELSE (result_mode
,
22979 gen_rtx_GE (VOIDmode
,
22981 true_cond
, false_cond
)));
22985 /* Same as above, but for ints (isel). */
22988 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22990 rtx condition_rtx
, cr
;
22991 machine_mode mode
= GET_MODE (dest
);
22992 enum rtx_code cond_code
;
22993 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
22996 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
22999 /* We still have to do the compare, because isel doesn't do a
23000 compare, it just looks at the CRx bits set by a previous compare
23002 condition_rtx
= rs6000_generate_compare (op
, mode
);
23003 cond_code
= GET_CODE (condition_rtx
);
23004 cr
= XEXP (condition_rtx
, 0);
23005 signedp
= GET_MODE (cr
) == CCmode
;
23007 isel_func
= (mode
== SImode
23008 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
23009 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
23013 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
23014 /* isel handles these directly. */
23018 /* We need to swap the sense of the comparison. */
23020 std::swap (false_cond
, true_cond
);
23021 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
23026 false_cond
= force_reg (mode
, false_cond
);
23027 if (true_cond
!= const0_rtx
)
23028 true_cond
= force_reg (mode
, true_cond
);
23030 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
23036 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
23038 machine_mode mode
= GET_MODE (op0
);
23042 /* VSX/altivec have direct min/max insns. */
23043 if ((code
== SMAX
|| code
== SMIN
)
23044 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
23045 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
23047 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
23051 if (code
== SMAX
|| code
== SMIN
)
23056 if (code
== SMAX
|| code
== UMAX
)
23057 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23058 op0
, op1
, mode
, 0);
23060 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23061 op1
, op0
, mode
, 0);
23062 gcc_assert (target
);
23063 if (target
!= dest
)
23064 emit_move_insn (dest
, target
);
23067 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23068 COND is true. Mark the jump as unlikely to be taken. */
23071 emit_unlikely_jump (rtx cond
, rtx label
)
23073 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
23074 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
23075 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
23078 /* A subroutine of the atomic operation splitters. Emit a load-locked
23079 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23080 the zero_extend operation. */
23083 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
23085 rtx (*fn
) (rtx
, rtx
) = NULL
;
23090 fn
= gen_load_lockedqi
;
23093 fn
= gen_load_lockedhi
;
23096 if (GET_MODE (mem
) == QImode
)
23097 fn
= gen_load_lockedqi_si
;
23098 else if (GET_MODE (mem
) == HImode
)
23099 fn
= gen_load_lockedhi_si
;
23101 fn
= gen_load_lockedsi
;
23104 fn
= gen_load_lockeddi
;
23107 fn
= gen_load_lockedti
;
23110 gcc_unreachable ();
23112 emit_insn (fn (reg
, mem
));
23115 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23116 instruction in MODE. */
23119 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
23121 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
23126 fn
= gen_store_conditionalqi
;
23129 fn
= gen_store_conditionalhi
;
23132 fn
= gen_store_conditionalsi
;
23135 fn
= gen_store_conditionaldi
;
23138 fn
= gen_store_conditionalti
;
23141 gcc_unreachable ();
23144 /* Emit sync before stwcx. to address PPC405 Erratum. */
23145 if (PPC405_ERRATUM77
)
23146 emit_insn (gen_hwsync ());
23148 emit_insn (fn (res
, mem
, val
));
23151 /* Expand barriers before and after a load_locked/store_cond sequence. */
23154 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
23156 rtx addr
= XEXP (mem
, 0);
23158 if (!legitimate_indirect_address_p (addr
, reload_completed
)
23159 && !legitimate_indexed_address_p (addr
, reload_completed
))
23161 addr
= force_reg (Pmode
, addr
);
23162 mem
= replace_equiv_address_nv (mem
, addr
);
23167 case MEMMODEL_RELAXED
:
23168 case MEMMODEL_CONSUME
:
23169 case MEMMODEL_ACQUIRE
:
23171 case MEMMODEL_RELEASE
:
23172 case MEMMODEL_ACQ_REL
:
23173 emit_insn (gen_lwsync ());
23175 case MEMMODEL_SEQ_CST
:
23176 emit_insn (gen_hwsync ());
23179 gcc_unreachable ();
23185 rs6000_post_atomic_barrier (enum memmodel model
)
23189 case MEMMODEL_RELAXED
:
23190 case MEMMODEL_CONSUME
:
23191 case MEMMODEL_RELEASE
:
23193 case MEMMODEL_ACQUIRE
:
23194 case MEMMODEL_ACQ_REL
:
23195 case MEMMODEL_SEQ_CST
:
23196 emit_insn (gen_isync ());
23199 gcc_unreachable ();
23203 /* A subroutine of the various atomic expanders. For sub-word operations,
23204 we must adjust things to operate on SImode. Given the original MEM,
23205 return a new aligned memory. Also build and return the quantities by
23206 which to shift and mask. */
23209 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
23211 rtx addr
, align
, shift
, mask
, mem
;
23212 HOST_WIDE_INT shift_mask
;
23213 machine_mode mode
= GET_MODE (orig_mem
);
23215 /* For smaller modes, we have to implement this via SImode. */
23216 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
23218 addr
= XEXP (orig_mem
, 0);
23219 addr
= force_reg (GET_MODE (addr
), addr
);
23221 /* Aligned memory containing subword. Generate a new memory. We
23222 do not want any of the existing MEM_ATTR data, as we're now
23223 accessing memory outside the original object. */
23224 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
23225 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23226 mem
= gen_rtx_MEM (SImode
, align
);
23227 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
23228 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
23229 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
23231 /* Shift amount for subword relative to aligned word. */
23232 shift
= gen_reg_rtx (SImode
);
23233 addr
= gen_lowpart (SImode
, addr
);
23234 rtx tmp
= gen_reg_rtx (SImode
);
23235 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
23236 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
23237 if (BYTES_BIG_ENDIAN
)
23238 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
23239 shift
, 1, OPTAB_LIB_WIDEN
);
23242 /* Mask for insertion. */
23243 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
23244 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23250 /* A subroutine of the various atomic expanders. For sub-word operands,
23251 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23254 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
23258 x
= gen_reg_rtx (SImode
);
23259 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
23260 gen_rtx_NOT (SImode
, mask
),
23263 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
23268 /* A subroutine of the various atomic expanders. For sub-word operands,
23269 extract WIDE to NARROW via SHIFT. */
23272 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
23274 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
23275 wide
, 1, OPTAB_LIB_WIDEN
);
23276 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
23279 /* Expand an atomic compare and swap operation. */
23282 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
23284 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
23285 rtx label1
, label2
, x
, mask
, shift
;
23286 machine_mode mode
, orig_mode
;
23287 enum memmodel mod_s
, mod_f
;
23290 boolval
= operands
[0];
23291 retval
= operands
[1];
23293 oldval
= operands
[3];
23294 newval
= operands
[4];
23295 is_weak
= (INTVAL (operands
[5]) != 0);
23296 mod_s
= memmodel_base (INTVAL (operands
[6]));
23297 mod_f
= memmodel_base (INTVAL (operands
[7]));
23298 orig_mode
= mode
= GET_MODE (mem
);
23300 mask
= shift
= NULL_RTX
;
23301 if (mode
== QImode
|| mode
== HImode
)
23303 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23304 lwarx and shift/mask operations. With power8, we need to do the
23305 comparison in SImode, but the store is still done in QI/HImode. */
23306 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
23308 if (!TARGET_SYNC_HI_QI
)
23310 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23312 /* Shift and mask OLDVAL into position with the word. */
23313 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
23314 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23316 /* Shift and mask NEWVAL into position within the word. */
23317 newval
= convert_modes (SImode
, mode
, newval
, 1);
23318 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
23319 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23322 /* Prepare to adjust the return value. */
23323 retval
= gen_reg_rtx (SImode
);
23326 else if (reg_overlap_mentioned_p (retval
, oldval
))
23327 oldval
= copy_to_reg (oldval
);
23329 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
23330 oldval
= copy_to_mode_reg (mode
, oldval
);
23332 if (reg_overlap_mentioned_p (retval
, newval
))
23333 newval
= copy_to_reg (newval
);
23335 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
23340 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23341 emit_label (XEXP (label1
, 0));
23343 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23345 emit_load_locked (mode
, retval
, mem
);
23349 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
23350 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23352 cond
= gen_reg_rtx (CCmode
);
23353 /* If we have TImode, synthesize a comparison. */
23354 if (mode
!= TImode
)
23355 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
23358 rtx xor1_result
= gen_reg_rtx (DImode
);
23359 rtx xor2_result
= gen_reg_rtx (DImode
);
23360 rtx or_result
= gen_reg_rtx (DImode
);
23361 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
23362 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
23363 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
23364 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
23366 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
23367 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
23368 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
23369 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
23372 emit_insn (gen_rtx_SET (cond
, x
));
23374 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23375 emit_unlikely_jump (x
, label2
);
23379 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
23381 emit_store_conditional (orig_mode
, cond
, mem
, x
);
23385 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23386 emit_unlikely_jump (x
, label1
);
23389 if (!is_mm_relaxed (mod_f
))
23390 emit_label (XEXP (label2
, 0));
23392 rs6000_post_atomic_barrier (mod_s
);
23394 if (is_mm_relaxed (mod_f
))
23395 emit_label (XEXP (label2
, 0));
23398 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
23399 else if (mode
!= GET_MODE (operands
[1]))
23400 convert_move (operands
[1], retval
, 1);
23402 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23403 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23404 emit_insn (gen_rtx_SET (boolval
, x
));
23407 /* Expand an atomic exchange operation. */
23410 rs6000_expand_atomic_exchange (rtx operands
[])
23412 rtx retval
, mem
, val
, cond
;
23414 enum memmodel model
;
23415 rtx label
, x
, mask
, shift
;
23417 retval
= operands
[0];
23420 model
= memmodel_base (INTVAL (operands
[3]));
23421 mode
= GET_MODE (mem
);
23423 mask
= shift
= NULL_RTX
;
23424 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23426 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23428 /* Shift and mask VAL into position with the word. */
23429 val
= convert_modes (SImode
, mode
, val
, 1);
23430 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23431 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23433 /* Prepare to adjust the return value. */
23434 retval
= gen_reg_rtx (SImode
);
23438 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23440 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23441 emit_label (XEXP (label
, 0));
23443 emit_load_locked (mode
, retval
, mem
);
23447 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23449 cond
= gen_reg_rtx (CCmode
);
23450 emit_store_conditional (mode
, cond
, mem
, x
);
23452 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23453 emit_unlikely_jump (x
, label
);
23455 rs6000_post_atomic_barrier (model
);
23458 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23461 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23462 to perform. MEM is the memory on which to operate. VAL is the second
23463 operand of the binary operator. BEFORE and AFTER are optional locations to
23464 return the value of MEM either before of after the operation. MODEL_RTX
23465 is a CONST_INT containing the memory model to use. */
23468 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23469 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23471 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23472 machine_mode mode
= GET_MODE (mem
);
23473 machine_mode store_mode
= mode
;
23474 rtx label
, x
, cond
, mask
, shift
;
23475 rtx before
= orig_before
, after
= orig_after
;
23477 mask
= shift
= NULL_RTX
;
23478 /* On power8, we want to use SImode for the operation. On previous systems,
23479 use the operation in a subword and shift/mask to get the proper byte or
23481 if (mode
== QImode
|| mode
== HImode
)
23483 if (TARGET_SYNC_HI_QI
)
23485 val
= convert_modes (SImode
, mode
, val
, 1);
23487 /* Prepare to adjust the return value. */
23488 before
= gen_reg_rtx (SImode
);
23490 after
= gen_reg_rtx (SImode
);
23495 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23497 /* Shift and mask VAL into position with the word. */
23498 val
= convert_modes (SImode
, mode
, val
, 1);
23499 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23500 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23506 /* We've already zero-extended VAL. That is sufficient to
23507 make certain that it does not affect other bits. */
23512 /* If we make certain that all of the other bits in VAL are
23513 set, that will be sufficient to not affect other bits. */
23514 x
= gen_rtx_NOT (SImode
, mask
);
23515 x
= gen_rtx_IOR (SImode
, x
, val
);
23516 emit_insn (gen_rtx_SET (val
, x
));
23523 /* These will all affect bits outside the field and need
23524 adjustment via MASK within the loop. */
23528 gcc_unreachable ();
23531 /* Prepare to adjust the return value. */
23532 before
= gen_reg_rtx (SImode
);
23534 after
= gen_reg_rtx (SImode
);
23535 store_mode
= mode
= SImode
;
23539 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23541 label
= gen_label_rtx ();
23542 emit_label (label
);
23543 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23545 if (before
== NULL_RTX
)
23546 before
= gen_reg_rtx (mode
);
23548 emit_load_locked (mode
, before
, mem
);
23552 x
= expand_simple_binop (mode
, AND
, before
, val
,
23553 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23554 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23558 after
= expand_simple_binop (mode
, code
, before
, val
,
23559 after
, 1, OPTAB_LIB_WIDEN
);
23565 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23566 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23567 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23569 else if (store_mode
!= mode
)
23570 x
= convert_modes (store_mode
, mode
, x
, 1);
23572 cond
= gen_reg_rtx (CCmode
);
23573 emit_store_conditional (store_mode
, cond
, mem
, x
);
23575 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23576 emit_unlikely_jump (x
, label
);
23578 rs6000_post_atomic_barrier (model
);
23582 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23583 then do the calcuations in a SImode register. */
23585 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23587 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23589 else if (store_mode
!= mode
)
23591 /* QImode/HImode on machines with lbarx/lharx where we do the native
23592 operation and then do the calcuations in a SImode register. */
23594 convert_move (orig_before
, before
, 1);
23596 convert_move (orig_after
, after
, 1);
23598 else if (orig_after
&& after
!= orig_after
)
23599 emit_move_insn (orig_after
, after
);
23602 /* Emit instructions to move SRC to DST. Called by splitters for
23603 multi-register moves. It will emit at most one instruction for
23604 each register that is accessed; that is, it won't emit li/lis pairs
23605 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23609 rs6000_split_multireg_move (rtx dst
, rtx src
)
23611 /* The register number of the first register being moved. */
23613 /* The mode that is to be moved. */
23615 /* The mode that the move is being done in, and its size. */
23616 machine_mode reg_mode
;
23618 /* The number of registers that will be moved. */
23621 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23622 mode
= GET_MODE (dst
);
23623 nregs
= hard_regno_nregs (reg
, mode
);
23624 if (FP_REGNO_P (reg
))
23625 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23626 (TARGET_HARD_FLOAT
? DFmode
: SFmode
);
23627 else if (ALTIVEC_REGNO_P (reg
))
23628 reg_mode
= V16QImode
;
23630 reg_mode
= word_mode
;
23631 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23633 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23635 /* TDmode residing in FP registers is special, since the ISA requires that
23636 the lower-numbered word of a register pair is always the most significant
23637 word, even in little-endian mode. This does not match the usual subreg
23638 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23639 the appropriate constituent registers "by hand" in little-endian mode.
23641 Note we do not need to check for destructive overlap here since TDmode
23642 can only reside in even/odd register pairs. */
23643 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23648 for (i
= 0; i
< nregs
; i
++)
23650 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23651 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23653 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23654 i
* reg_mode_size
);
23656 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23657 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23659 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23660 i
* reg_mode_size
);
23662 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23668 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23670 /* Move register range backwards, if we might have destructive
23673 for (i
= nregs
- 1; i
>= 0; i
--)
23674 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23675 i
* reg_mode_size
),
23676 simplify_gen_subreg (reg_mode
, src
, mode
,
23677 i
* reg_mode_size
)));
23683 bool used_update
= false;
23684 rtx restore_basereg
= NULL_RTX
;
23686 if (MEM_P (src
) && INT_REGNO_P (reg
))
23690 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
23691 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
23694 breg
= XEXP (XEXP (src
, 0), 0);
23695 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
23696 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
23697 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
23698 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23699 src
= replace_equiv_address (src
, breg
);
23701 else if (! rs6000_offsettable_memref_p (src
, reg_mode
, true))
23703 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
23705 rtx basereg
= XEXP (XEXP (src
, 0), 0);
23708 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
23709 emit_insn (gen_rtx_SET (ndst
,
23710 gen_rtx_MEM (reg_mode
,
23712 used_update
= true;
23715 emit_insn (gen_rtx_SET (basereg
,
23716 XEXP (XEXP (src
, 0), 1)));
23717 src
= replace_equiv_address (src
, basereg
);
23721 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
23722 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
23723 src
= replace_equiv_address (src
, basereg
);
23727 breg
= XEXP (src
, 0);
23728 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
23729 breg
= XEXP (breg
, 0);
23731 /* If the base register we are using to address memory is
23732 also a destination reg, then change that register last. */
23734 && REGNO (breg
) >= REGNO (dst
)
23735 && REGNO (breg
) < REGNO (dst
) + nregs
)
23736 j
= REGNO (breg
) - REGNO (dst
);
23738 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
23742 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23743 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
23746 breg
= XEXP (XEXP (dst
, 0), 0);
23747 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23748 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
23749 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
23751 /* We have to update the breg before doing the store.
23752 Use store with update, if available. */
23756 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23757 emit_insn (TARGET_32BIT
23758 ? (TARGET_POWERPC64
23759 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
23760 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
23761 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
23762 used_update
= true;
23765 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23766 dst
= replace_equiv_address (dst
, breg
);
23768 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
, true)
23769 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23771 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
23773 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23776 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23777 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
23780 used_update
= true;
23783 emit_insn (gen_rtx_SET (basereg
,
23784 XEXP (XEXP (dst
, 0), 1)));
23785 dst
= replace_equiv_address (dst
, basereg
);
23789 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23790 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
23791 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
23793 && REG_P (offsetreg
)
23794 && REGNO (basereg
) != REGNO (offsetreg
));
23795 if (REGNO (basereg
) == 0)
23797 rtx tmp
= offsetreg
;
23798 offsetreg
= basereg
;
23801 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
23802 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
23803 dst
= replace_equiv_address (dst
, basereg
);
23806 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23807 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
, true));
23810 for (i
= 0; i
< nregs
; i
++)
23812 /* Calculate index to next subword. */
23817 /* If compiler already emitted move of first word by
23818 store with update, no need to do anything. */
23819 if (j
== 0 && used_update
)
23822 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23823 j
* reg_mode_size
),
23824 simplify_gen_subreg (reg_mode
, src
, mode
,
23825 j
* reg_mode_size
)));
23827 if (restore_basereg
!= NULL_RTX
)
23828 emit_insn (restore_basereg
);
23833 /* This page contains routines that are used to determine what the
23834 function prologue and epilogue code will do and write them out. */
23836 /* Determine whether the REG is really used. */
23839 save_reg_p (int reg
)
23841 /* We need to mark the PIC offset register live for the same conditions
23842 as it is set up, or otherwise it won't be saved before we clobber it. */
23844 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
23846 /* When calling eh_return, we must return true for all the cases
23847 where conditional_register_usage marks the PIC offset reg
23849 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
23850 && (crtl
->calls_eh_return
23851 || df_regs_ever_live_p (reg
)
23852 || !constant_pool_empty_p ()))
23855 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
23860 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
23863 /* Return the first fixed-point register that is required to be
23864 saved. 32 if none. */
23867 first_reg_to_save (void)
23871 /* Find lowest numbered live register. */
23872 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
23873 if (save_reg_p (first_reg
))
23878 && crtl
->uses_pic_offset_table
23879 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
23880 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
23886 /* Similar, for FP regs. */
23889 first_fp_reg_to_save (void)
23893 /* Find lowest numbered live register. */
23894 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
23895 if (save_reg_p (first_reg
))
23901 /* Similar, for AltiVec regs. */
23904 first_altivec_reg_to_save (void)
23908 /* Stack frame remains as is unless we are in AltiVec ABI. */
23909 if (! TARGET_ALTIVEC_ABI
)
23910 return LAST_ALTIVEC_REGNO
+ 1;
23912 /* On Darwin, the unwind routines are compiled without
23913 TARGET_ALTIVEC, and use save_world to save/restore the
23914 altivec registers when necessary. */
23915 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
23916 && ! TARGET_ALTIVEC
)
23917 return FIRST_ALTIVEC_REGNO
+ 20;
23919 /* Find lowest numbered live register. */
23920 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23921 if (save_reg_p (i
))
23927 /* Return a 32-bit mask of the AltiVec registers we need to set in
23928 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23929 the 32-bit word is 0. */
23931 static unsigned int
23932 compute_vrsave_mask (void)
23934 unsigned int i
, mask
= 0;
23936 /* On Darwin, the unwind routines are compiled without
23937 TARGET_ALTIVEC, and use save_world to save/restore the
23938 call-saved altivec registers when necessary. */
23939 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
23940 && ! TARGET_ALTIVEC
)
23943 /* First, find out if we use _any_ altivec registers. */
23944 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23945 if (df_regs_ever_live_p (i
))
23946 mask
|= ALTIVEC_REG_BIT (i
);
23951 /* Next, remove the argument registers from the set. These must
23952 be in the VRSAVE mask set by the caller, so we don't need to add
23953 them in again. More importantly, the mask we compute here is
23954 used to generate CLOBBERs in the set_vrsave insn, and we do not
23955 wish the argument registers to die. */
23956 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
23957 mask
&= ~ALTIVEC_REG_BIT (i
);
23959 /* Similarly, remove the return value from the set. */
23962 diddle_return_value (is_altivec_return_reg
, &yes
);
23964 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
23970 /* For a very restricted set of circumstances, we can cut down the
23971 size of prologues/epilogues by calling our own save/restore-the-world
23975 compute_save_world_info (rs6000_stack_t
*info
)
23977 info
->world_save_p
= 1;
23979 = (WORLD_SAVE_P (info
)
23980 && DEFAULT_ABI
== ABI_DARWIN
23981 && !cfun
->has_nonlocal_label
23982 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
23983 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
23984 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
23985 && info
->cr_save_p
);
23987 /* This will not work in conjunction with sibcalls. Make sure there
23988 are none. (This check is expensive, but seldom executed.) */
23989 if (WORLD_SAVE_P (info
))
23992 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
23993 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
23995 info
->world_save_p
= 0;
24000 if (WORLD_SAVE_P (info
))
24002 /* Even if we're not touching VRsave, make sure there's room on the
24003 stack for it, if it looks like we're calling SAVE_WORLD, which
24004 will attempt to save it. */
24005 info
->vrsave_size
= 4;
24007 /* If we are going to save the world, we need to save the link register too. */
24008 info
->lr_save_p
= 1;
24010 /* "Save" the VRsave register too if we're saving the world. */
24011 if (info
->vrsave_mask
== 0)
24012 info
->vrsave_mask
= compute_vrsave_mask ();
24014 /* Because the Darwin register save/restore routines only handle
24015 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24017 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
24018 && (info
->first_altivec_reg_save
24019 >= FIRST_SAVED_ALTIVEC_REGNO
));
24027 is_altivec_return_reg (rtx reg
, void *xyes
)
24029 bool *yes
= (bool *) xyes
;
24030 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
24035 /* Return whether REG is a global user reg or has been specifed by
24036 -ffixed-REG. We should not restore these, and so cannot use
24037 lmw or out-of-line restore functions if there are any. We also
24038 can't save them (well, emit frame notes for them), because frame
24039 unwinding during exception handling will restore saved registers. */
24042 fixed_reg_p (int reg
)
24044 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24045 backend sets it, overriding anything the user might have given. */
24046 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
24047 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24048 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
24049 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
24052 return fixed_regs
[reg
];
24055 /* Determine the strategy for savings/restoring registers. */
24058 SAVE_MULTIPLE
= 0x1,
24059 SAVE_INLINE_GPRS
= 0x2,
24060 SAVE_INLINE_FPRS
= 0x4,
24061 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
24062 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
24063 SAVE_INLINE_VRS
= 0x20,
24064 REST_MULTIPLE
= 0x100,
24065 REST_INLINE_GPRS
= 0x200,
24066 REST_INLINE_FPRS
= 0x400,
24067 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
24068 REST_INLINE_VRS
= 0x1000
24072 rs6000_savres_strategy (rs6000_stack_t
*info
,
24073 bool using_static_chain_p
)
24077 /* Select between in-line and out-of-line save and restore of regs.
24078 First, all the obvious cases where we don't use out-of-line. */
24079 if (crtl
->calls_eh_return
24080 || cfun
->machine
->ra_need_lr
)
24081 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
24082 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
24083 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
24085 if (info
->first_gp_reg_save
== 32)
24086 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24088 if (info
->first_fp_reg_save
== 64)
24089 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24091 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
24092 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24094 /* Define cutoff for using out-of-line functions to save registers. */
24095 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
24097 if (!optimize_size
)
24099 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24100 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24101 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24105 /* Prefer out-of-line restore if it will exit. */
24106 if (info
->first_fp_reg_save
> 61)
24107 strategy
|= SAVE_INLINE_FPRS
;
24108 if (info
->first_gp_reg_save
> 29)
24110 if (info
->first_fp_reg_save
== 64)
24111 strategy
|= SAVE_INLINE_GPRS
;
24113 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24115 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
24116 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24119 else if (DEFAULT_ABI
== ABI_DARWIN
)
24121 if (info
->first_fp_reg_save
> 60)
24122 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24123 if (info
->first_gp_reg_save
> 29)
24124 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24125 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24129 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24130 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
24131 || info
->first_fp_reg_save
> 61)
24132 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24133 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24134 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24137 /* Don't bother to try to save things out-of-line if r11 is occupied
24138 by the static chain. It would require too much fiddling and the
24139 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24140 pointer on Darwin, and AIX uses r1 or r12. */
24141 if (using_static_chain_p
24142 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
24143 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
24145 | SAVE_INLINE_VRS
);
24147 /* Don't ever restore fixed regs. That means we can't use the
24148 out-of-line register restore functions if a fixed reg is in the
24149 range of regs restored. */
24150 if (!(strategy
& REST_INLINE_FPRS
))
24151 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24154 strategy
|= REST_INLINE_FPRS
;
24158 /* We can only use the out-of-line routines to restore fprs if we've
24159 saved all the registers from first_fp_reg_save in the prologue.
24160 Otherwise, we risk loading garbage. Of course, if we have saved
24161 out-of-line then we know we haven't skipped any fprs. */
24162 if ((strategy
& SAVE_INLINE_FPRS
)
24163 && !(strategy
& REST_INLINE_FPRS
))
24164 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24165 if (!save_reg_p (i
))
24167 strategy
|= REST_INLINE_FPRS
;
24171 /* Similarly, for altivec regs. */
24172 if (!(strategy
& REST_INLINE_VRS
))
24173 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24176 strategy
|= REST_INLINE_VRS
;
24180 if ((strategy
& SAVE_INLINE_VRS
)
24181 && !(strategy
& REST_INLINE_VRS
))
24182 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24183 if (!save_reg_p (i
))
24185 strategy
|= REST_INLINE_VRS
;
24189 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24190 saved is an out-of-line save or restore. Set up the value for
24191 the next test (excluding out-of-line gprs). */
24192 bool lr_save_p
= (info
->lr_save_p
24193 || !(strategy
& SAVE_INLINE_FPRS
)
24194 || !(strategy
& SAVE_INLINE_VRS
)
24195 || !(strategy
& REST_INLINE_FPRS
)
24196 || !(strategy
& REST_INLINE_VRS
));
24198 if (TARGET_MULTIPLE
24199 && !TARGET_POWERPC64
24200 && info
->first_gp_reg_save
< 31
24201 && !(flag_shrink_wrap
24202 && flag_shrink_wrap_separate
24203 && optimize_function_for_speed_p (cfun
)))
24206 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24207 if (save_reg_p (i
))
24211 /* Don't use store multiple if only one reg needs to be
24212 saved. This can occur for example when the ABI_V4 pic reg
24213 (r30) needs to be saved to make calls, but r31 is not
24215 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24218 /* Prefer store multiple for saves over out-of-line
24219 routines, since the store-multiple instruction will
24220 always be smaller. */
24221 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
24223 /* The situation is more complicated with load multiple.
24224 We'd prefer to use the out-of-line routines for restores,
24225 since the "exit" out-of-line routines can handle the
24226 restore of LR and the frame teardown. However if doesn't
24227 make sense to use the out-of-line routine if that is the
24228 only reason we'd need to save LR, and we can't use the
24229 "exit" out-of-line gpr restore if we have saved some
24230 fprs; In those cases it is advantageous to use load
24231 multiple when available. */
24232 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
24233 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
24237 /* Using the "exit" out-of-line routine does not improve code size
24238 if using it would require lr to be saved and if only saving one
24240 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
24241 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24243 /* Don't ever restore fixed regs. */
24244 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24245 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24246 if (fixed_reg_p (i
))
24248 strategy
|= REST_INLINE_GPRS
;
24249 strategy
&= ~REST_MULTIPLE
;
24253 /* We can only use load multiple or the out-of-line routines to
24254 restore gprs if we've saved all the registers from
24255 first_gp_reg_save. Otherwise, we risk loading garbage.
24256 Of course, if we have saved out-of-line or used stmw then we know
24257 we haven't skipped any gprs. */
24258 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
24259 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24260 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24261 if (!save_reg_p (i
))
24263 strategy
|= REST_INLINE_GPRS
;
24264 strategy
&= ~REST_MULTIPLE
;
24268 if (TARGET_ELF
&& TARGET_64BIT
)
24270 if (!(strategy
& SAVE_INLINE_FPRS
))
24271 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24272 else if (!(strategy
& SAVE_INLINE_GPRS
)
24273 && info
->first_fp_reg_save
== 64)
24274 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
24276 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
24277 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
24279 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
24280 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24285 /* Calculate the stack information for the current function. This is
24286 complicated by having two separate calling sequences, the AIX calling
24287 sequence and the V.4 calling sequence.
24289 AIX (and Darwin/Mac OS X) stack frames look like:
24291 SP----> +---------------------------------------+
24292 | back chain to caller | 0 0
24293 +---------------------------------------+
24294 | saved CR | 4 8 (8-11)
24295 +---------------------------------------+
24297 +---------------------------------------+
24298 | reserved for compilers | 12 24
24299 +---------------------------------------+
24300 | reserved for binders | 16 32
24301 +---------------------------------------+
24302 | saved TOC pointer | 20 40
24303 +---------------------------------------+
24304 | Parameter save area (+padding*) (P) | 24 48
24305 +---------------------------------------+
24306 | Alloca space (A) | 24+P etc.
24307 +---------------------------------------+
24308 | Local variable space (L) | 24+P+A
24309 +---------------------------------------+
24310 | Float/int conversion temporary (X) | 24+P+A+L
24311 +---------------------------------------+
24312 | Save area for AltiVec registers (W) | 24+P+A+L+X
24313 +---------------------------------------+
24314 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24315 +---------------------------------------+
24316 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24317 +---------------------------------------+
24318 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24319 +---------------------------------------+
24320 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24321 +---------------------------------------+
24322 old SP->| back chain to caller's caller |
24323 +---------------------------------------+
24325 * If the alloca area is present, the parameter save area is
24326 padded so that the former starts 16-byte aligned.
24328 The required alignment for AIX configurations is two words (i.e., 8
24331 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24333 SP----> +---------------------------------------+
24334 | Back chain to caller | 0
24335 +---------------------------------------+
24336 | Save area for CR | 8
24337 +---------------------------------------+
24339 +---------------------------------------+
24340 | Saved TOC pointer | 24
24341 +---------------------------------------+
24342 | Parameter save area (+padding*) (P) | 32
24343 +---------------------------------------+
24344 | Alloca space (A) | 32+P
24345 +---------------------------------------+
24346 | Local variable space (L) | 32+P+A
24347 +---------------------------------------+
24348 | Save area for AltiVec registers (W) | 32+P+A+L
24349 +---------------------------------------+
24350 | AltiVec alignment padding (Y) | 32+P+A+L+W
24351 +---------------------------------------+
24352 | Save area for GP registers (G) | 32+P+A+L+W+Y
24353 +---------------------------------------+
24354 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24355 +---------------------------------------+
24356 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24357 +---------------------------------------+
24359 * If the alloca area is present, the parameter save area is
24360 padded so that the former starts 16-byte aligned.
24362 V.4 stack frames look like:
24364 SP----> +---------------------------------------+
24365 | back chain to caller | 0
24366 +---------------------------------------+
24367 | caller's saved LR | 4
24368 +---------------------------------------+
24369 | Parameter save area (+padding*) (P) | 8
24370 +---------------------------------------+
24371 | Alloca space (A) | 8+P
24372 +---------------------------------------+
24373 | Varargs save area (V) | 8+P+A
24374 +---------------------------------------+
24375 | Local variable space (L) | 8+P+A+V
24376 +---------------------------------------+
24377 | Float/int conversion temporary (X) | 8+P+A+V+L
24378 +---------------------------------------+
24379 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24380 +---------------------------------------+
24381 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24382 +---------------------------------------+
24383 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24384 +---------------------------------------+
24385 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24386 +---------------------------------------+
24387 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24388 +---------------------------------------+
24389 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24390 +---------------------------------------+
24391 old SP->| back chain to caller's caller |
24392 +---------------------------------------+
24394 * If the alloca area is present and the required alignment is
24395 16 bytes, the parameter save area is padded so that the
24396 alloca area starts 16-byte aligned.
24398 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24399 given. (But note below and in sysv4.h that we require only 8 and
24400 may round up the size of our stack frame anyways. The historical
24401 reason is early versions of powerpc-linux which didn't properly
24402 align the stack at program startup. A happy side-effect is that
24403 -mno-eabi libraries can be used with -meabi programs.)
24405 The EABI configuration defaults to the V.4 layout. However,
24406 the stack alignment requirements may differ. If -mno-eabi is not
24407 given, the required stack alignment is 8 bytes; if -mno-eabi is
24408 given, the required alignment is 16 bytes. (But see V.4 comment
24411 #ifndef ABI_STACK_BOUNDARY
24412 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24415 static rs6000_stack_t
*
24416 rs6000_stack_info (void)
24418 /* We should never be called for thunks, we are not set up for that. */
24419 gcc_assert (!cfun
->is_thunk
);
24421 rs6000_stack_t
*info
= &stack_info
;
24422 int reg_size
= TARGET_32BIT
? 4 : 8;
24427 HOST_WIDE_INT non_fixed_size
;
24428 bool using_static_chain_p
;
24430 if (reload_completed
&& info
->reload_completed
)
24433 memset (info
, 0, sizeof (*info
));
24434 info
->reload_completed
= reload_completed
;
24436 /* Select which calling sequence. */
24437 info
->abi
= DEFAULT_ABI
;
24439 /* Calculate which registers need to be saved & save area size. */
24440 info
->first_gp_reg_save
= first_reg_to_save ();
24441 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24442 even if it currently looks like we won't. Reload may need it to
24443 get at a constant; if so, it will have already created a constant
24444 pool entry for it. */
24445 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24446 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24447 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24448 && crtl
->uses_const_pool
24449 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24450 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24452 first_gp
= info
->first_gp_reg_save
;
24454 info
->gp_size
= reg_size
* (32 - first_gp
);
24456 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24457 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24459 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24460 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24461 - info
->first_altivec_reg_save
);
24463 /* Does this function call anything? */
24464 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24466 /* Determine if we need to save the condition code registers. */
24467 if (save_reg_p (CR2_REGNO
)
24468 || save_reg_p (CR3_REGNO
)
24469 || save_reg_p (CR4_REGNO
))
24471 info
->cr_save_p
= 1;
24472 if (DEFAULT_ABI
== ABI_V4
)
24473 info
->cr_size
= reg_size
;
24476 /* If the current function calls __builtin_eh_return, then we need
24477 to allocate stack space for registers that will hold data for
24478 the exception handler. */
24479 if (crtl
->calls_eh_return
)
24482 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24485 ehrd_size
= i
* UNITS_PER_WORD
;
24490 /* In the ELFv2 ABI, we also need to allocate space for separate
24491 CR field save areas if the function calls __builtin_eh_return. */
24492 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24494 /* This hard-codes that we have three call-saved CR fields. */
24495 ehcr_size
= 3 * reg_size
;
24496 /* We do *not* use the regular CR save mechanism. */
24497 info
->cr_save_p
= 0;
24502 /* Determine various sizes. */
24503 info
->reg_size
= reg_size
;
24504 info
->fixed_size
= RS6000_SAVE_AREA
;
24505 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24506 if (cfun
->calls_alloca
)
24508 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24509 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24511 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24512 TARGET_ALTIVEC
? 16 : 8);
24513 if (FRAME_GROWS_DOWNWARD
)
24515 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24516 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24517 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24519 if (TARGET_ALTIVEC_ABI
)
24520 info
->vrsave_mask
= compute_vrsave_mask ();
24522 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24523 info
->vrsave_size
= 4;
24525 compute_save_world_info (info
);
24527 /* Calculate the offsets. */
24528 switch (DEFAULT_ABI
)
24532 gcc_unreachable ();
24537 info
->fp_save_offset
= -info
->fp_size
;
24538 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24540 if (TARGET_ALTIVEC_ABI
)
24542 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24544 /* Align stack so vector save area is on a quadword boundary.
24545 The padding goes above the vectors. */
24546 if (info
->altivec_size
!= 0)
24547 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24549 info
->altivec_save_offset
= info
->vrsave_save_offset
24550 - info
->altivec_padding_size
24551 - info
->altivec_size
;
24552 gcc_assert (info
->altivec_size
== 0
24553 || info
->altivec_save_offset
% 16 == 0);
24555 /* Adjust for AltiVec case. */
24556 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24559 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24561 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24562 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24563 info
->lr_save_offset
= 2*reg_size
;
24567 info
->fp_save_offset
= -info
->fp_size
;
24568 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24569 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24571 if (TARGET_ALTIVEC_ABI
)
24573 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24575 /* Align stack so vector save area is on a quadword boundary. */
24576 if (info
->altivec_size
!= 0)
24577 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24579 info
->altivec_save_offset
= info
->vrsave_save_offset
24580 - info
->altivec_padding_size
24581 - info
->altivec_size
;
24583 /* Adjust for AltiVec case. */
24584 info
->ehrd_offset
= info
->altivec_save_offset
;
24587 info
->ehrd_offset
= info
->cr_save_offset
;
24589 info
->ehrd_offset
-= ehrd_size
;
24590 info
->lr_save_offset
= reg_size
;
24593 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24594 info
->save_size
= RS6000_ALIGN (info
->fp_size
24596 + info
->altivec_size
24597 + info
->altivec_padding_size
24601 + info
->vrsave_size
,
24604 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24606 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24607 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24609 /* Determine if we need to save the link register. */
24611 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24613 && !TARGET_PROFILE_KERNEL
)
24614 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24615 #ifdef TARGET_RELOCATABLE
24616 || (DEFAULT_ABI
== ABI_V4
24617 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24618 && !constant_pool_empty_p ())
24620 || rs6000_ra_ever_killed ())
24621 info
->lr_save_p
= 1;
24623 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24624 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24625 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24626 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24628 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24629 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24630 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24631 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24632 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24633 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24634 info
->lr_save_p
= 1;
24636 if (info
->lr_save_p
)
24637 df_set_regs_ever_live (LR_REGNO
, true);
24639 /* Determine if we need to allocate any stack frame:
24641 For AIX we need to push the stack if a frame pointer is needed
24642 (because the stack might be dynamically adjusted), if we are
24643 debugging, if we make calls, or if the sum of fp_save, gp_save,
24644 and local variables are more than the space needed to save all
24645 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24646 + 18*8 = 288 (GPR13 reserved).
24648 For V.4 we don't have the stack cushion that AIX uses, but assume
24649 that the debugger can handle stackless frames. */
24654 else if (DEFAULT_ABI
== ABI_V4
)
24655 info
->push_p
= non_fixed_size
!= 0;
24657 else if (frame_pointer_needed
)
24660 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24664 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24670 debug_stack_info (rs6000_stack_t
*info
)
24672 const char *abi_string
;
24675 info
= rs6000_stack_info ();
24677 fprintf (stderr
, "\nStack information for function %s:\n",
24678 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24679 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
24684 default: abi_string
= "Unknown"; break;
24685 case ABI_NONE
: abi_string
= "NONE"; break;
24686 case ABI_AIX
: abi_string
= "AIX"; break;
24687 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
24688 case ABI_DARWIN
: abi_string
= "Darwin"; break;
24689 case ABI_V4
: abi_string
= "V.4"; break;
24692 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
24694 if (TARGET_ALTIVEC_ABI
)
24695 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
24697 if (info
->first_gp_reg_save
!= 32)
24698 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
24700 if (info
->first_fp_reg_save
!= 64)
24701 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
24703 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
24704 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
24705 info
->first_altivec_reg_save
);
24707 if (info
->lr_save_p
)
24708 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
24710 if (info
->cr_save_p
)
24711 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
24713 if (info
->vrsave_mask
)
24714 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
24717 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
24720 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
24723 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
24726 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
24728 if (info
->altivec_size
)
24729 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
24730 info
->altivec_save_offset
);
24732 if (info
->vrsave_size
)
24733 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
24734 info
->vrsave_save_offset
);
24736 if (info
->lr_save_p
)
24737 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
24739 if (info
->cr_save_p
)
24740 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
24742 if (info
->varargs_save_offset
)
24743 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
24745 if (info
->total_size
)
24746 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24749 if (info
->vars_size
)
24750 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24753 if (info
->parm_size
)
24754 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
24756 if (info
->fixed_size
)
24757 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
24760 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
24763 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
24765 if (info
->altivec_size
)
24766 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
24768 if (info
->vrsave_size
)
24769 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
24771 if (info
->altivec_padding_size
)
24772 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
24773 info
->altivec_padding_size
);
24776 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
24778 if (info
->save_size
)
24779 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
24781 if (info
->reg_size
!= 4)
24782 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
24784 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
24786 fprintf (stderr
, "\n");
24790 rs6000_return_addr (int count
, rtx frame
)
24792 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24793 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24795 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
24797 cfun
->machine
->ra_needs_full_frame
= 1;
24800 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24801 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24802 frame
= stack_pointer_rtx
;
24803 rtx prev_frame_addr
= memory_address (Pmode
, frame
);
24804 rtx prev_frame
= copy_to_reg (gen_rtx_MEM (Pmode
, prev_frame_addr
));
24805 rtx lr_save_off
= plus_constant (Pmode
,
24806 prev_frame
, RETURN_ADDRESS_OFFSET
);
24807 rtx lr_save_addr
= memory_address (Pmode
, lr_save_off
);
24808 return gen_rtx_MEM (Pmode
, lr_save_addr
);
24811 cfun
->machine
->ra_need_lr
= 1;
24812 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
24815 /* Say whether a function is a candidate for sibcall handling or not. */
24818 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
24822 /* The sibcall epilogue may clobber the static chain register.
24823 ??? We could work harder and avoid that, but it's probably
24824 not worth the hassle in practice. */
24825 if (CALL_EXPR_STATIC_CHAIN (exp
))
24829 fntype
= TREE_TYPE (decl
);
24831 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
24833 /* We can't do it if the called function has more vector parameters
24834 than the current function; there's nowhere to put the VRsave code. */
24835 if (TARGET_ALTIVEC_ABI
24836 && TARGET_ALTIVEC_VRSAVE
24837 && !(decl
&& decl
== current_function_decl
))
24839 function_args_iterator args_iter
;
24843 /* Functions with vector parameters are required to have a
24844 prototype, so the argument type info must be available
24846 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
24847 if (TREE_CODE (type
) == VECTOR_TYPE
24848 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
24851 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
24852 if (TREE_CODE (type
) == VECTOR_TYPE
24853 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
24860 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24861 functions, because the callee may have a different TOC pointer to
24862 the caller and there's no way to ensure we restore the TOC when
24863 we return. With the secure-plt SYSV ABI we can't make non-local
24864 calls when -fpic/PIC because the plt call stubs use r30. */
24865 if (DEFAULT_ABI
== ABI_DARWIN
24866 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24868 && !DECL_EXTERNAL (decl
)
24869 && !DECL_WEAK (decl
)
24870 && (*targetm
.binds_local_p
) (decl
))
24871 || (DEFAULT_ABI
== ABI_V4
24872 && (!TARGET_SECURE_PLT
24875 && (*targetm
.binds_local_p
) (decl
)))))
24877 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
24879 if (!lookup_attribute ("longcall", attr_list
)
24880 || lookup_attribute ("shortcall", attr_list
))
24888 rs6000_ra_ever_killed (void)
24894 if (cfun
->is_thunk
)
24897 if (cfun
->machine
->lr_save_state
)
24898 return cfun
->machine
->lr_save_state
- 1;
24900 /* regs_ever_live has LR marked as used if any sibcalls are present,
24901 but this should not force saving and restoring in the
24902 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24903 clobbers LR, so that is inappropriate. */
24905 /* Also, the prologue can generate a store into LR that
24906 doesn't really count, like this:
24909 bcl to set PIC register
24913 When we're called from the epilogue, we need to avoid counting
24914 this as a store. */
24916 push_topmost_sequence ();
24917 top
= get_insns ();
24918 pop_topmost_sequence ();
24919 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
24921 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
24927 if (!SIBLING_CALL_P (insn
))
24930 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
24932 else if (set_of (reg
, insn
) != NULL_RTX
24933 && !prologue_epilogue_contains (insn
))
24940 /* Emit instructions needed to load the TOC register.
24941 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24942 a constant pool; or for SVR4 -fpic. */
24945 rs6000_emit_load_toc_table (int fromprolog
)
24948 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
24950 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24953 rtx lab
, tmp1
, tmp2
, got
;
24955 lab
= gen_label_rtx ();
24956 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
24957 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
24960 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
24964 got
= rs6000_got_sym ();
24965 tmp1
= tmp2
= dest
;
24968 tmp1
= gen_reg_rtx (Pmode
);
24969 tmp2
= gen_reg_rtx (Pmode
);
24971 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
24972 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
24973 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
24974 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
24976 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
24978 emit_insn (gen_load_toc_v4_pic_si ());
24979 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
24981 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
24984 rtx temp0
= (fromprolog
24985 ? gen_rtx_REG (Pmode
, 0)
24986 : gen_reg_rtx (Pmode
));
24992 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
24993 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
24995 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
24996 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
24998 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
24999 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25000 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
25006 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25008 lab
= gen_label_rtx ();
25009 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
25010 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25011 if (TARGET_LINK_STACK
)
25012 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
25013 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
25015 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
25017 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
25019 /* This is for AIX code running in non-PIC ELF32. */
25020 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25023 emit_insn (gen_elf_high (dest
, realsym
));
25024 emit_insn (gen_elf_low (dest
, dest
, realsym
));
25028 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
25031 emit_insn (gen_load_toc_aix_si (dest
));
25033 emit_insn (gen_load_toc_aix_di (dest
));
25037 /* Emit instructions to restore the link register after determining where
25038 its value has been stored. */
25041 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
25043 rs6000_stack_t
*info
= rs6000_stack_info ();
25046 operands
[0] = source
;
25047 operands
[1] = scratch
;
25049 if (info
->lr_save_p
)
25051 rtx frame_rtx
= stack_pointer_rtx
;
25052 HOST_WIDE_INT sp_offset
= 0;
25055 if (frame_pointer_needed
25056 || cfun
->calls_alloca
25057 || info
->total_size
> 32767)
25059 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
25060 emit_move_insn (operands
[1], tmp
);
25061 frame_rtx
= operands
[1];
25063 else if (info
->push_p
)
25064 sp_offset
= info
->total_size
;
25066 tmp
= plus_constant (Pmode
, frame_rtx
,
25067 info
->lr_save_offset
+ sp_offset
);
25068 tmp
= gen_frame_mem (Pmode
, tmp
);
25069 emit_move_insn (tmp
, operands
[0]);
25072 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
25074 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25075 state of lr_save_p so any change from here on would be a bug. In
25076 particular, stop rs6000_ra_ever_killed from considering the SET
25077 of lr we may have added just above. */
25078 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
25081 static GTY(()) alias_set_type set
= -1;
25084 get_TOC_alias_set (void)
25087 set
= new_alias_set ();
25091 /* This returns nonzero if the current function uses the TOC. This is
25092 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25093 is generated by the ABI_V4 load_toc_* patterns.
25094 Return 2 instead of 1 if the load_toc_* pattern is in the function
25095 partition that doesn't start the function. */
25103 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
25107 rtx pat
= PATTERN (insn
);
25110 if (GET_CODE (pat
) == PARALLEL
)
25111 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
25113 rtx sub
= XVECEXP (pat
, 0, i
);
25114 if (GET_CODE (sub
) == USE
)
25116 sub
= XEXP (sub
, 0);
25117 if (GET_CODE (sub
) == UNSPEC
25118 && XINT (sub
, 1) == UNSPEC_TOC
)
25123 else if (crtl
->has_bb_partition
25125 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
25133 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
25135 rtx tocrel
, tocreg
, hi
;
25137 if (TARGET_DEBUG_ADDR
)
25139 if (GET_CODE (symbol
) == SYMBOL_REF
)
25140 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25144 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
25145 GET_RTX_NAME (GET_CODE (symbol
)));
25146 debug_rtx (symbol
);
25150 if (!can_create_pseudo_p ())
25151 df_set_regs_ever_live (TOC_REGISTER
, true);
25153 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
25154 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
25155 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
25158 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
25159 if (largetoc_reg
!= NULL
)
25161 emit_move_insn (largetoc_reg
, hi
);
25164 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
25167 /* Issue assembly directives that create a reference to the given DWARF
25168 FRAME_TABLE_LABEL from the current function section. */
25170 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
25172 fprintf (asm_out_file
, "\t.ref %s\n",
25173 (* targetm
.strip_name_encoding
) (frame_table_label
));
25176 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25177 and the change to the stack pointer. */
25180 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
25187 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25188 if (hard_frame_needed
)
25189 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
25190 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
25191 || (hard_frame_needed
25192 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
25195 p
= rtvec_alloc (i
);
25198 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
25199 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
25202 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
25205 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25206 and set the appropriate attributes for the generated insn. Return the
25207 first insn which adjusts the stack pointer or the last insn before
25208 the stack adjustment loop.
25210 SIZE_INT is used to create the CFI note for the allocation.
25212 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25213 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25215 ORIG_SP contains the backchain value that must be stored at *sp. */
25218 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int
, rtx orig_sp
)
25222 rtx size_rtx
= GEN_INT (-size_int
);
25223 if (size_int
> 32767)
25225 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25226 /* Need a note here so that try_split doesn't get confused. */
25227 if (get_last_insn () == NULL_RTX
)
25228 emit_note (NOTE_INSN_DELETED
);
25229 insn
= emit_move_insn (tmp_reg
, size_rtx
);
25230 try_split (PATTERN (insn
), insn
, 0);
25231 size_rtx
= tmp_reg
;
25234 if (Pmode
== SImode
)
25235 insn
= emit_insn (gen_movsi_update_stack (stack_pointer_rtx
,
25240 insn
= emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx
,
25244 rtx par
= PATTERN (insn
);
25245 gcc_assert (GET_CODE (par
) == PARALLEL
);
25246 rtx set
= XVECEXP (par
, 0, 0);
25247 gcc_assert (GET_CODE (set
) == SET
);
25248 rtx mem
= SET_DEST (set
);
25249 gcc_assert (MEM_P (mem
));
25250 MEM_NOTRAP_P (mem
) = 1;
25251 set_mem_alias_set (mem
, get_frame_alias_set ());
25253 RTX_FRAME_RELATED_P (insn
) = 1;
25254 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25255 gen_rtx_SET (stack_pointer_rtx
,
25256 gen_rtx_PLUS (Pmode
,
25258 GEN_INT (-size_int
))));
25260 /* Emit a blockage to ensure the allocation/probing insns are
25261 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25262 note for similar reasons. */
25263 if (flag_stack_clash_protection
)
25265 add_reg_note (insn
, REG_STACK_CHECK
, const0_rtx
);
25266 emit_insn (gen_blockage ());
25272 static HOST_WIDE_INT
25273 get_stack_clash_protection_probe_interval (void)
25275 return (HOST_WIDE_INT_1U
25276 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL
));
25279 static HOST_WIDE_INT
25280 get_stack_clash_protection_guard_size (void)
25282 return (HOST_WIDE_INT_1U
25283 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE
));
25286 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25287 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25289 COPY_REG, if non-null, should contain a copy of the original
25290 stack pointer at exit from this function.
25292 This is subtly different than the Ada probing in that it tries hard to
25293 prevent attacks that jump the stack guard. Thus it is never allowed to
25294 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25295 space without a suitable probe. */
25297 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size
,
25300 rtx orig_sp
= copy_reg
;
25302 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25304 /* Round the size down to a multiple of PROBE_INTERVAL. */
25305 HOST_WIDE_INT rounded_size
= ROUND_DOWN (orig_size
, probe_interval
);
25307 /* If explicitly requested,
25308 or the rounded size is not the same as the original size
25309 or the the rounded size is greater than a page,
25310 then we will need a copy of the original stack pointer. */
25311 if (rounded_size
!= orig_size
25312 || rounded_size
> probe_interval
25315 /* If the caller did not request a copy of the incoming stack
25316 pointer, then we use r0 to hold the copy. */
25318 orig_sp
= gen_rtx_REG (Pmode
, 0);
25319 emit_move_insn (orig_sp
, stack_pointer_rtx
);
25322 /* There's three cases here.
25324 One is a single probe which is the most common and most efficiently
25325 implemented as it does not have to have a copy of the original
25326 stack pointer if there are no residuals.
25328 Second is unrolled allocation/probes which we use if there's just
25329 a few of them. It needs to save the original stack pointer into a
25330 temporary for use as a source register in the allocation/probe.
25332 Last is a loop. This is the most uncommon case and least efficient. */
25333 rtx_insn
*retval
= NULL
;
25334 if (rounded_size
== probe_interval
)
25336 retval
= rs6000_emit_allocate_stack_1 (probe_interval
, stack_pointer_rtx
);
25338 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25340 else if (rounded_size
<= 8 * probe_interval
)
25342 /* The ABI requires using the store with update insns to allocate
25343 space and store the backchain into the stack
25345 So we save the current stack pointer into a temporary, then
25346 emit the store-with-update insns to store the saved stack pointer
25347 into the right location in each new page. */
25348 for (int i
= 0; i
< rounded_size
; i
+= probe_interval
)
25351 = rs6000_emit_allocate_stack_1 (probe_interval
, orig_sp
);
25353 /* Save the first stack adjustment in RETVAL. */
25358 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
25362 /* Compute the ending address. */
25364 = copy_reg
? gen_rtx_REG (Pmode
, 0) : gen_rtx_REG (Pmode
, 12);
25365 rtx rs
= GEN_INT (-rounded_size
);
25367 if (add_operand (rs
, Pmode
))
25368 insn
= emit_insn (gen_add3_insn (end_addr
, stack_pointer_rtx
, rs
));
25371 emit_move_insn (end_addr
, GEN_INT (-rounded_size
));
25372 insn
= emit_insn (gen_add3_insn (end_addr
, end_addr
,
25373 stack_pointer_rtx
));
25374 /* Describe the effect of INSN to the CFI engine. */
25375 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25376 gen_rtx_SET (end_addr
,
25377 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
25380 RTX_FRAME_RELATED_P (insn
) = 1;
25382 /* Emit the loop. */
25384 retval
= emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx
,
25385 stack_pointer_rtx
, orig_sp
,
25388 retval
= emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx
,
25389 stack_pointer_rtx
, orig_sp
,
25391 RTX_FRAME_RELATED_P (retval
) = 1;
25392 /* Describe the effect of INSN to the CFI engine. */
25393 add_reg_note (retval
, REG_FRAME_RELATED_EXPR
,
25394 gen_rtx_SET (stack_pointer_rtx
, end_addr
));
25396 /* Emit a blockage to ensure the allocation/probing insns are
25397 not optimized, combined, removed, etc. Other cases handle this
25398 within their call to rs6000_emit_allocate_stack_1. */
25399 emit_insn (gen_blockage ());
25401 dump_stack_clash_frame_info (PROBE_LOOP
, rounded_size
!= orig_size
);
25404 if (orig_size
!= rounded_size
)
25406 /* Allocate (and implicitly probe) any residual space. */
25407 HOST_WIDE_INT residual
= orig_size
- rounded_size
;
25409 rtx_insn
*insn
= rs6000_emit_allocate_stack_1 (residual
, orig_sp
);
25411 /* If the residual was the only allocation, then we can return the
25412 allocating insn. */
25420 /* Emit the correct code for allocating stack space, as insns.
25421 If COPY_REG, make sure a copy of the old frame is left there.
25422 The generated code may use hard register 0 as a temporary. */
25425 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25428 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25429 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25430 rtx todec
= gen_int_mode (-size
, Pmode
);
25432 if (INTVAL (todec
) != -size
)
25434 warning (0, "stack frame too large");
25435 emit_insn (gen_trap ());
25439 if (crtl
->limit_stack
)
25441 if (REG_P (stack_limit_rtx
)
25442 && REGNO (stack_limit_rtx
) > 1
25443 && REGNO (stack_limit_rtx
) <= 31)
25446 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25449 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25451 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25453 && DEFAULT_ABI
== ABI_V4
25456 rtx toload
= gen_rtx_CONST (VOIDmode
,
25457 gen_rtx_PLUS (Pmode
,
25461 emit_insn (gen_elf_high (tmp_reg
, toload
));
25462 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25463 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25467 warning (0, "stack limit expression is not supported");
25470 if (flag_stack_clash_protection
)
25472 if (size
< get_stack_clash_protection_guard_size ())
25473 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
25476 rtx_insn
*insn
= rs6000_emit_probe_stack_range_stack_clash (size
,
25479 /* If we asked for a copy with an offset, then we still need add in
25481 if (copy_reg
&& copy_off
)
25482 emit_insn (gen_add3_insn (copy_reg
, copy_reg
, GEN_INT (copy_off
)));
25490 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25492 emit_move_insn (copy_reg
, stack_reg
);
25495 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25496 it now and set the alias set/attributes. The above gen_*_update
25497 calls will generate a PARALLEL with the MEM set being the first
25499 insn
= rs6000_emit_allocate_stack_1 (size
, stack_reg
);
25503 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25505 #if PROBE_INTERVAL > 32768
25506 #error Cannot use indexed addressing mode for stack probing
25509 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25510 inclusive. These are offsets from the current stack pointer. */
25513 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25515 /* See if we have a constant small number of probes to generate. If so,
25516 that's the easy case. */
25517 if (first
+ size
<= 32768)
25521 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25522 it exceeds SIZE. If only one probe is needed, this will not
25523 generate any code. Then probe at FIRST + SIZE. */
25524 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25525 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25528 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25532 /* Otherwise, do the same as above, but in a loop. Note that we must be
25533 extra careful with variables wrapping around because we might be at
25534 the very top (or the very bottom) of the address space and we have
25535 to be able to handle this case properly; in particular, we use an
25536 equality test for the loop condition. */
25539 HOST_WIDE_INT rounded_size
;
25540 rtx r12
= gen_rtx_REG (Pmode
, 12);
25541 rtx r0
= gen_rtx_REG (Pmode
, 0);
25543 /* Sanity check for the addressing mode we're going to use. */
25544 gcc_assert (first
<= 32768);
25546 /* Step 1: round SIZE to the previous multiple of the interval. */
25548 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25551 /* Step 2: compute initial and final value of the loop counter. */
25553 /* TEST_ADDR = SP + FIRST. */
25554 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25557 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25558 if (rounded_size
> 32768)
25560 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25561 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25564 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25568 /* Step 3: the loop
25572 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25575 while (TEST_ADDR != LAST_ADDR)
25577 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25578 until it is equal to ROUNDED_SIZE. */
25581 emit_insn (gen_probe_stack_rangedi (r12
, r12
, stack_pointer_rtx
, r0
));
25583 emit_insn (gen_probe_stack_rangesi (r12
, r12
, stack_pointer_rtx
, r0
));
25586 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25587 that SIZE is equal to ROUNDED_SIZE. */
25589 if (size
!= rounded_size
)
25590 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25594 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25595 addresses, not offsets. */
25597 static const char *
25598 output_probe_stack_range_1 (rtx reg1
, rtx reg2
)
25600 static int labelno
= 0;
25604 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25607 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25609 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25611 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25612 output_asm_insn ("addi %0,%0,%1", xops
);
25614 /* Probe at TEST_ADDR. */
25615 xops
[1] = gen_rtx_REG (Pmode
, 0);
25616 output_asm_insn ("stw %1,0(%0)", xops
);
25618 /* Test if TEST_ADDR == LAST_ADDR. */
25621 output_asm_insn ("cmpd 0,%0,%1", xops
);
25623 output_asm_insn ("cmpw 0,%0,%1", xops
);
25626 fputs ("\tbne 0,", asm_out_file
);
25627 assemble_name_raw (asm_out_file
, loop_lab
);
25628 fputc ('\n', asm_out_file
);
25633 /* This function is called when rs6000_frame_related is processing
25634 SETs within a PARALLEL, and returns whether the REGNO save ought to
25635 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25636 for out-of-line register save functions, store multiple, and the
25637 Darwin world_save. They may contain registers that don't really
25641 interesting_frame_related_regno (unsigned int regno
)
25643 /* Saves apparently of r0 are actually saving LR. It doesn't make
25644 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25645 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25646 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25647 as frame related. */
25650 /* If we see CR2 then we are here on a Darwin world save. Saves of
25651 CR2 signify the whole CR is being saved. This is a long-standing
25652 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25653 that CR needs to be saved. */
25654 if (regno
== CR2_REGNO
)
25656 /* Omit frame info for any user-defined global regs. If frame info
25657 is supplied for them, frame unwinding will restore a user reg.
25658 Also omit frame info for any reg we don't need to save, as that
25659 bloats frame info and can cause problems with shrink wrapping.
25660 Since global regs won't be seen as needing to be saved, both of
25661 these conditions are covered by save_reg_p. */
25662 return save_reg_p (regno
);
25665 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25666 addresses, not offsets.
25668 REG2 contains the backchain that must be stored into *sp at each allocation.
25670 This is subtly different than the Ada probing above in that it tries hard
25671 to prevent attacks that jump the stack guard. Thus, it is never allowed
25672 to allocate more than PROBE_INTERVAL bytes of stack space without a
25675 static const char *
25676 output_probe_stack_range_stack_clash (rtx reg1
, rtx reg2
, rtx reg3
)
25678 static int labelno
= 0;
25682 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25684 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25686 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25688 /* This allocates and probes. */
25691 xops
[2] = GEN_INT (-probe_interval
);
25693 output_asm_insn ("stdu %1,%2(%0)", xops
);
25695 output_asm_insn ("stwu %1,%2(%0)", xops
);
25697 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25701 output_asm_insn ("cmpd 0,%0,%1", xops
);
25703 output_asm_insn ("cmpw 0,%0,%1", xops
);
25705 fputs ("\tbne 0,", asm_out_file
);
25706 assemble_name_raw (asm_out_file
, loop_lab
);
25707 fputc ('\n', asm_out_file
);
25712 /* Wrapper around the output_probe_stack_range routines. */
25714 output_probe_stack_range (rtx reg1
, rtx reg2
, rtx reg3
)
25716 if (flag_stack_clash_protection
)
25717 return output_probe_stack_range_stack_clash (reg1
, reg2
, reg3
);
25719 return output_probe_stack_range_1 (reg1
, reg3
);
25722 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25723 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25724 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25725 deduce these equivalences by itself so it wasn't necessary to hold
25726 its hand so much. Don't be tempted to always supply d2_f_d_e with
25727 the actual cfa register, ie. r31 when we are using a hard frame
25728 pointer. That fails when saving regs off r1, and sched moves the
25729 r31 setup past the reg saves. */
25732 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
25733 rtx reg2
, rtx repl2
)
25737 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
25739 gcc_checking_assert (val
== 0);
25743 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
25746 rtx pat
= PATTERN (insn
);
25747 if (!repl
&& !reg2
)
25749 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25750 if (GET_CODE (pat
) == PARALLEL
)
25751 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25752 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25754 rtx set
= XVECEXP (pat
, 0, i
);
25756 if (!REG_P (SET_SRC (set
))
25757 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25758 RTX_FRAME_RELATED_P (set
) = 1;
25760 RTX_FRAME_RELATED_P (insn
) = 1;
25764 /* We expect that 'pat' is either a SET or a PARALLEL containing
25765 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25766 are important so they all have to be marked RTX_FRAME_RELATED_P.
25767 Call simplify_replace_rtx on the SETs rather than the whole insn
25768 so as to leave the other stuff alone (for example USE of r12). */
25770 set_used_flags (pat
);
25771 if (GET_CODE (pat
) == SET
)
25774 pat
= simplify_replace_rtx (pat
, reg
, repl
);
25776 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
25778 else if (GET_CODE (pat
) == PARALLEL
)
25780 pat
= shallow_copy_rtx (pat
);
25781 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
25783 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25784 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25786 rtx set
= XVECEXP (pat
, 0, i
);
25789 set
= simplify_replace_rtx (set
, reg
, repl
);
25791 set
= simplify_replace_rtx (set
, reg2
, repl2
);
25792 XVECEXP (pat
, 0, i
) = set
;
25794 if (!REG_P (SET_SRC (set
))
25795 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25796 RTX_FRAME_RELATED_P (set
) = 1;
25800 gcc_unreachable ();
25802 RTX_FRAME_RELATED_P (insn
) = 1;
25803 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
25808 /* Returns an insn that has a vrsave set operation with the
25809 appropriate CLOBBERs. */
25812 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
25815 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
25816 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
25819 = gen_rtx_SET (vrsave
,
25820 gen_rtx_UNSPEC_VOLATILE (SImode
,
25821 gen_rtvec (2, reg
, vrsave
),
25822 UNSPECV_SET_VRSAVE
));
25826 /* We need to clobber the registers in the mask so the scheduler
25827 does not move sets to VRSAVE before sets of AltiVec registers.
25829 However, if the function receives nonlocal gotos, reload will set
25830 all call saved registers live. We will end up with:
25832 (set (reg 999) (mem))
25833 (parallel [ (set (reg vrsave) (unspec blah))
25834 (clobber (reg 999))])
25836 The clobber will cause the store into reg 999 to be dead, and
25837 flow will attempt to delete an epilogue insn. In this case, we
25838 need an unspec use/set of the register. */
25840 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
25841 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
25843 if (!epiloguep
|| call_used_regs
[i
])
25844 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
25845 gen_rtx_REG (V4SImode
, i
));
25848 rtx reg
= gen_rtx_REG (V4SImode
, i
);
25851 = gen_rtx_SET (reg
,
25852 gen_rtx_UNSPEC (V4SImode
,
25853 gen_rtvec (1, reg
), 27));
25857 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
25859 for (i
= 0; i
< nclobs
; ++i
)
25860 XVECEXP (insn
, 0, i
) = clobs
[i
];
25866 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
25870 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
25871 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
25872 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
25876 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
25878 return gen_frame_set (reg
, frame_reg
, offset
, false);
25882 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
25884 return gen_frame_set (reg
, frame_reg
, offset
, true);
25887 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25888 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25891 emit_frame_save (rtx frame_reg
, machine_mode mode
,
25892 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
25896 /* Some cases that need register indexed addressing. */
25897 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
25898 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
25900 reg
= gen_rtx_REG (mode
, regno
);
25901 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
25902 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
25903 NULL_RTX
, NULL_RTX
);
25906 /* Emit an offset memory reference suitable for a frame store, while
25907 converting to a valid addressing mode. */
25910 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
25912 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
25915 #ifndef TARGET_FIX_AND_CONTINUE
25916 #define TARGET_FIX_AND_CONTINUE 0
25919 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25920 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25921 #define LAST_SAVRES_REGISTER 31
25922 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25933 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
25935 /* Temporary holding space for an out-of-line register save/restore
25937 static char savres_routine_name
[30];
25939 /* Return the name for an out-of-line register save/restore routine.
25940 We are saving/restoring GPRs if GPR is true. */
25943 rs6000_savres_routine_name (int regno
, int sel
)
25945 const char *prefix
= "";
25946 const char *suffix
= "";
25948 /* Different targets are supposed to define
25949 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25950 routine name could be defined with:
25952 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25954 This is a nice idea in practice, but in reality, things are
25955 complicated in several ways:
25957 - ELF targets have save/restore routines for GPRs.
25959 - PPC64 ELF targets have routines for save/restore of GPRs that
25960 differ in what they do with the link register, so having a set
25961 prefix doesn't work. (We only use one of the save routines at
25962 the moment, though.)
25964 - PPC32 elf targets have "exit" versions of the restore routines
25965 that restore the link register and can save some extra space.
25966 These require an extra suffix. (There are also "tail" versions
25967 of the restore routines and "GOT" versions of the save routines,
25968 but we don't generate those at present. Same problems apply,
25971 We deal with all this by synthesizing our own prefix/suffix and
25972 using that for the simple sprintf call shown above. */
25973 if (DEFAULT_ABI
== ABI_V4
)
25978 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25979 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
25980 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25981 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
25982 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25983 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25987 if ((sel
& SAVRES_LR
))
25990 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25992 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25993 /* No out-of-line save/restore routines for GPRs on AIX. */
25994 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
25998 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25999 prefix
= ((sel
& SAVRES_SAVE
)
26000 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
26001 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
26002 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26004 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26005 if ((sel
& SAVRES_LR
))
26006 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
26010 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
26011 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
26014 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26015 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
26020 if (DEFAULT_ABI
== ABI_DARWIN
)
26022 /* The Darwin approach is (slightly) different, in order to be
26023 compatible with code generated by the system toolchain. There is a
26024 single symbol for the start of save sequence, and the code here
26025 embeds an offset into that code on the basis of the first register
26027 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
26028 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
26029 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
26030 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
26031 (regno
- 13) * 4, prefix
, regno
);
26032 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
26033 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
26034 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
26035 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26036 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
26037 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
26042 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
26044 return savres_routine_name
;
26047 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26048 We are saving/restoring GPRs if GPR is true. */
26051 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
26053 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26054 ? info
->first_gp_reg_save
26055 : (sel
& SAVRES_REG
) == SAVRES_FPR
26056 ? info
->first_fp_reg_save
- 32
26057 : (sel
& SAVRES_REG
) == SAVRES_VR
26058 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
26063 /* Don't generate bogus routine names. */
26064 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
26065 && regno
<= LAST_SAVRES_REGISTER
26066 && select
>= 0 && select
<= 12);
26068 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
26074 name
= rs6000_savres_routine_name (regno
, sel
);
26076 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
26077 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
26078 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
26084 /* Emit a sequence of insns, including a stack tie if needed, for
26085 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26086 reset the stack pointer, but move the base of the frame into
26087 reg UPDT_REGNO for use by out-of-line register restore routines. */
26090 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
26091 unsigned updt_regno
)
26093 /* If there is nothing to do, don't do anything. */
26094 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
26097 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
26099 /* This blockage is needed so that sched doesn't decide to move
26100 the sp change before the register restores. */
26101 if (DEFAULT_ABI
== ABI_V4
)
26102 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
26103 GEN_INT (frame_off
)));
26105 /* If we are restoring registers out-of-line, we will be using the
26106 "exit" variants of the restore routines, which will reset the
26107 stack for us. But we do need to point updt_reg into the
26108 right place for those routines. */
26109 if (frame_off
!= 0)
26110 return emit_insn (gen_add3_insn (updt_reg_rtx
,
26111 frame_reg_rtx
, GEN_INT (frame_off
)));
26113 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
26118 /* Return the register number used as a pointer by out-of-line
26119 save/restore functions. */
26121 static inline unsigned
26122 ptr_regno_for_savres (int sel
)
26124 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26125 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
26126 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
26129 /* Construct a parallel rtx describing the effect of a call to an
26130 out-of-line register save/restore routine, and emit the insn
26131 or jump_insn as appropriate. */
26134 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
26135 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
26136 machine_mode reg_mode
, int sel
)
26139 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
26140 int reg_size
= GET_MODE_SIZE (reg_mode
);
26147 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26148 ? info
->first_gp_reg_save
26149 : (sel
& SAVRES_REG
) == SAVRES_FPR
26150 ? info
->first_fp_reg_save
26151 : (sel
& SAVRES_REG
) == SAVRES_VR
26152 ? info
->first_altivec_reg_save
26154 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26156 : (sel
& SAVRES_REG
) == SAVRES_FPR
26158 : (sel
& SAVRES_REG
) == SAVRES_VR
26159 ? LAST_ALTIVEC_REGNO
+ 1
26161 n_regs
= end_reg
- start_reg
;
26162 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
26163 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
26166 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26167 RTVEC_ELT (p
, offset
++) = ret_rtx
;
26169 RTVEC_ELT (p
, offset
++)
26170 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
26172 sym
= rs6000_savres_routine_sym (info
, sel
);
26173 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
26175 use_reg
= ptr_regno_for_savres (sel
);
26176 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26178 /* Vector regs are saved/restored using [reg+reg] addressing. */
26179 RTVEC_ELT (p
, offset
++)
26180 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26181 RTVEC_ELT (p
, offset
++)
26182 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
26185 RTVEC_ELT (p
, offset
++)
26186 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26188 for (i
= 0; i
< end_reg
- start_reg
; i
++)
26189 RTVEC_ELT (p
, i
+ offset
)
26190 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
26191 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
26192 (sel
& SAVRES_SAVE
) != 0);
26194 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26195 RTVEC_ELT (p
, i
+ offset
)
26196 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
26198 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
26200 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26202 insn
= emit_jump_insn (par
);
26203 JUMP_LABEL (insn
) = ret_rtx
;
26206 insn
= emit_insn (par
);
26210 /* Emit prologue code to store CR fields that need to be saved into REG. This
26211 function should only be called when moving the non-volatile CRs to REG, it
26212 is not a general purpose routine to move the entire set of CRs to REG.
26213 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26217 rs6000_emit_prologue_move_from_cr (rtx reg
)
26219 /* Only the ELFv2 ABI allows storing only selected fields. */
26220 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
26222 int i
, cr_reg
[8], count
= 0;
26224 /* Collect CR fields that must be saved. */
26225 for (i
= 0; i
< 8; i
++)
26226 if (save_reg_p (CR0_REGNO
+ i
))
26227 cr_reg
[count
++] = i
;
26229 /* If it's just a single one, use mfcrf. */
26232 rtvec p
= rtvec_alloc (1);
26233 rtvec r
= rtvec_alloc (2);
26234 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
26235 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
26237 = gen_rtx_SET (reg
,
26238 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
26240 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26244 /* ??? It might be better to handle count == 2 / 3 cases here
26245 as well, using logical operations to combine the values. */
26248 emit_insn (gen_prologue_movesi_from_cr (reg
));
26251 /* Return whether the split-stack arg pointer (r12) is used. */
26254 split_stack_arg_pointer_used_p (void)
26256 /* If the pseudo holding the arg pointer is no longer a pseudo,
26257 then the arg pointer is used. */
26258 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
26259 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
26260 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
26261 < FIRST_PSEUDO_REGISTER
)))
26264 /* Unfortunately we also need to do some code scanning, since
26265 r12 may have been substituted for the pseudo. */
26267 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
26268 FOR_BB_INSNS (bb
, insn
)
26269 if (NONDEBUG_INSN_P (insn
))
26271 /* A call destroys r12. */
26276 FOR_EACH_INSN_USE (use
, insn
)
26278 rtx x
= DF_REF_REG (use
);
26279 if (REG_P (x
) && REGNO (x
) == 12)
26283 FOR_EACH_INSN_DEF (def
, insn
)
26285 rtx x
= DF_REF_REG (def
);
26286 if (REG_P (x
) && REGNO (x
) == 12)
26290 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
26293 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26296 rs6000_global_entry_point_needed_p (void)
26298 /* Only needed for the ELFv2 ABI. */
26299 if (DEFAULT_ABI
!= ABI_ELFv2
)
26302 /* With -msingle-pic-base, we assume the whole program shares the same
26303 TOC, so no global entry point prologues are needed anywhere. */
26304 if (TARGET_SINGLE_PIC_BASE
)
26307 /* Ensure we have a global entry point for thunks. ??? We could
26308 avoid that if the target routine doesn't need a global entry point,
26309 but we do not know whether this is the case at this point. */
26310 if (cfun
->is_thunk
)
26313 /* For regular functions, rs6000_emit_prologue sets this flag if the
26314 routine ever uses the TOC pointer. */
26315 return cfun
->machine
->r2_setup_needed
;
26318 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26320 rs6000_get_separate_components (void)
26322 rs6000_stack_t
*info
= rs6000_stack_info ();
26324 if (WORLD_SAVE_P (info
))
26327 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
26328 && !(info
->savres_strategy
& REST_MULTIPLE
));
26330 /* Component 0 is the save/restore of LR (done via GPR0).
26331 Component 2 is the save of the TOC (GPR2).
26332 Components 13..31 are the save/restore of GPR13..GPR31.
26333 Components 46..63 are the save/restore of FPR14..FPR31. */
26335 cfun
->machine
->n_components
= 64;
26337 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26338 bitmap_clear (components
);
26340 int reg_size
= TARGET_32BIT
? 4 : 8;
26341 int fp_reg_size
= 8;
26343 /* The GPRs we need saved to the frame. */
26344 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
26345 && (info
->savres_strategy
& REST_INLINE_GPRS
))
26347 int offset
= info
->gp_save_offset
;
26349 offset
+= info
->total_size
;
26351 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26353 if (IN_RANGE (offset
, -0x8000, 0x7fff)
26354 && save_reg_p (regno
))
26355 bitmap_set_bit (components
, regno
);
26357 offset
+= reg_size
;
26361 /* Don't mess with the hard frame pointer. */
26362 if (frame_pointer_needed
)
26363 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
26365 /* Don't mess with the fixed TOC register. */
26366 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
26367 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
26368 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
26369 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
26371 /* The FPRs we need saved to the frame. */
26372 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
26373 && (info
->savres_strategy
& REST_INLINE_FPRS
))
26375 int offset
= info
->fp_save_offset
;
26377 offset
+= info
->total_size
;
26379 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26381 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
26382 bitmap_set_bit (components
, regno
);
26384 offset
+= fp_reg_size
;
26388 /* Optimize LR save and restore if we can. This is component 0. Any
26389 out-of-line register save/restore routines need LR. */
26390 if (info
->lr_save_p
26391 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
26392 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
26393 && (info
->savres_strategy
& REST_INLINE_GPRS
)
26394 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
26395 && (info
->savres_strategy
& REST_INLINE_FPRS
)
26396 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
26397 && (info
->savres_strategy
& REST_INLINE_VRS
))
26399 int offset
= info
->lr_save_offset
;
26401 offset
+= info
->total_size
;
26402 if (IN_RANGE (offset
, -0x8000, 0x7fff))
26403 bitmap_set_bit (components
, 0);
26406 /* Optimize saving the TOC. This is component 2. */
26407 if (cfun
->machine
->save_toc_in_prologue
)
26408 bitmap_set_bit (components
, 2);
26413 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26415 rs6000_components_for_bb (basic_block bb
)
26417 rs6000_stack_t
*info
= rs6000_stack_info ();
26419 bitmap in
= DF_LIVE_IN (bb
);
26420 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26421 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26423 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26424 bitmap_clear (components
);
26426 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26429 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26430 if (bitmap_bit_p (in
, regno
)
26431 || bitmap_bit_p (gen
, regno
)
26432 || bitmap_bit_p (kill
, regno
))
26433 bitmap_set_bit (components
, regno
);
26436 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26437 if (bitmap_bit_p (in
, regno
)
26438 || bitmap_bit_p (gen
, regno
)
26439 || bitmap_bit_p (kill
, regno
))
26440 bitmap_set_bit (components
, regno
);
26442 /* The link register. */
26443 if (bitmap_bit_p (in
, LR_REGNO
)
26444 || bitmap_bit_p (gen
, LR_REGNO
)
26445 || bitmap_bit_p (kill
, LR_REGNO
))
26446 bitmap_set_bit (components
, 0);
26448 /* The TOC save. */
26449 if (bitmap_bit_p (in
, TOC_REGNUM
)
26450 || bitmap_bit_p (gen
, TOC_REGNUM
)
26451 || bitmap_bit_p (kill
, TOC_REGNUM
))
26452 bitmap_set_bit (components
, 2);
26457 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26459 rs6000_disqualify_components (sbitmap components
, edge e
,
26460 sbitmap edge_components
, bool /*is_prologue*/)
26462 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26463 live where we want to place that code. */
26464 if (bitmap_bit_p (edge_components
, 0)
26465 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26468 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26469 "on entry to bb %d\n", e
->dest
->index
);
26470 bitmap_clear_bit (components
, 0);
26474 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26476 rs6000_emit_prologue_components (sbitmap components
)
26478 rs6000_stack_t
*info
= rs6000_stack_info ();
26479 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26480 ? HARD_FRAME_POINTER_REGNUM
26481 : STACK_POINTER_REGNUM
);
26483 machine_mode reg_mode
= Pmode
;
26484 int reg_size
= TARGET_32BIT
? 4 : 8;
26485 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
26486 int fp_reg_size
= 8;
26488 /* Prologue for LR. */
26489 if (bitmap_bit_p (components
, 0))
26491 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26492 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26493 rtx_insn
*insn
= emit_move_insn (reg
, lr
);
26494 RTX_FRAME_RELATED_P (insn
) = 1;
26495 add_reg_note (insn
, REG_CFA_REGISTER
, gen_rtx_SET (reg
, lr
));
26497 int offset
= info
->lr_save_offset
;
26499 offset
+= info
->total_size
;
26501 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26502 RTX_FRAME_RELATED_P (insn
) = 1;
26503 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26504 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26507 /* Prologue for TOC. */
26508 if (bitmap_bit_p (components
, 2))
26510 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
26511 rtx sp_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26512 emit_insn (gen_frame_store (reg
, sp_reg
, RS6000_TOC_SAVE_SLOT
));
26515 /* Prologue for the GPRs. */
26516 int offset
= info
->gp_save_offset
;
26518 offset
+= info
->total_size
;
26520 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26522 if (bitmap_bit_p (components
, i
))
26524 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26525 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26526 RTX_FRAME_RELATED_P (insn
) = 1;
26527 rtx set
= copy_rtx (single_set (insn
));
26528 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26531 offset
+= reg_size
;
26534 /* Prologue for the FPRs. */
26535 offset
= info
->fp_save_offset
;
26537 offset
+= info
->total_size
;
26539 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26541 if (bitmap_bit_p (components
, i
))
26543 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26544 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26545 RTX_FRAME_RELATED_P (insn
) = 1;
26546 rtx set
= copy_rtx (single_set (insn
));
26547 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26550 offset
+= fp_reg_size
;
26554 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26556 rs6000_emit_epilogue_components (sbitmap components
)
26558 rs6000_stack_t
*info
= rs6000_stack_info ();
26559 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26560 ? HARD_FRAME_POINTER_REGNUM
26561 : STACK_POINTER_REGNUM
);
26563 machine_mode reg_mode
= Pmode
;
26564 int reg_size
= TARGET_32BIT
? 4 : 8;
26566 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
26567 int fp_reg_size
= 8;
26569 /* Epilogue for the FPRs. */
26570 int offset
= info
->fp_save_offset
;
26572 offset
+= info
->total_size
;
26574 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26576 if (bitmap_bit_p (components
, i
))
26578 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26579 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26580 RTX_FRAME_RELATED_P (insn
) = 1;
26581 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26584 offset
+= fp_reg_size
;
26587 /* Epilogue for the GPRs. */
26588 offset
= info
->gp_save_offset
;
26590 offset
+= info
->total_size
;
26592 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26594 if (bitmap_bit_p (components
, i
))
26596 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26597 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26598 RTX_FRAME_RELATED_P (insn
) = 1;
26599 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26602 offset
+= reg_size
;
26605 /* Epilogue for LR. */
26606 if (bitmap_bit_p (components
, 0))
26608 int offset
= info
->lr_save_offset
;
26610 offset
+= info
->total_size
;
26612 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26613 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26615 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26616 insn
= emit_move_insn (lr
, reg
);
26617 RTX_FRAME_RELATED_P (insn
) = 1;
26618 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26622 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26624 rs6000_set_handled_components (sbitmap components
)
26626 rs6000_stack_t
*info
= rs6000_stack_info ();
26628 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26629 if (bitmap_bit_p (components
, i
))
26630 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26632 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26633 if (bitmap_bit_p (components
, i
))
26634 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26636 if (bitmap_bit_p (components
, 0))
26637 cfun
->machine
->lr_is_wrapped_separately
= true;
26639 if (bitmap_bit_p (components
, 2))
26640 cfun
->machine
->toc_is_wrapped_separately
= true;
26643 /* VRSAVE is a bit vector representing which AltiVec registers
26644 are used. The OS uses this to determine which vector
26645 registers to save on a context switch. We need to save
26646 VRSAVE on the stack frame, add whatever AltiVec registers we
26647 used in this function, and do the corresponding magic in the
26650 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26651 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26653 /* Get VRSAVE into a GPR. */
26654 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26655 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26657 emit_insn (gen_get_vrsave_internal (reg
));
26659 emit_insn (gen_rtx_SET (reg
, vrsave
));
26662 int offset
= info
->vrsave_save_offset
+ frame_off
;
26663 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26665 /* Include the registers in the mask. */
26666 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26668 emit_insn (generate_set_vrsave (reg
, info
, 0));
26671 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26672 called, it left the arg pointer to the old stack in r29. Otherwise, the
26673 arg pointer is the top of the current frame. */
26675 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26676 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26678 cfun
->machine
->split_stack_argp_used
= true;
26682 rtx r12
= gen_rtx_REG (Pmode
, 12);
26683 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26684 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26685 emit_insn_before (set_r12
, sp_adjust
);
26687 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
26689 rtx r12
= gen_rtx_REG (Pmode
, 12);
26690 if (frame_off
== 0)
26691 emit_move_insn (r12
, frame_reg_rtx
);
26693 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
26698 rtx r12
= gen_rtx_REG (Pmode
, 12);
26699 rtx r29
= gen_rtx_REG (Pmode
, 29);
26700 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
26701 rtx not_more
= gen_label_rtx ();
26704 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26705 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
26706 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
26708 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26709 JUMP_LABEL (jump
) = not_more
;
26710 LABEL_NUSES (not_more
) += 1;
26711 emit_move_insn (r12
, r29
);
26712 emit_label (not_more
);
26716 /* Emit function prologue as insns. */
26719 rs6000_emit_prologue (void)
26721 rs6000_stack_t
*info
= rs6000_stack_info ();
26722 machine_mode reg_mode
= Pmode
;
26723 int reg_size
= TARGET_32BIT
? 4 : 8;
26724 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
26725 int fp_reg_size
= 8;
26726 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26727 rtx frame_reg_rtx
= sp_reg_rtx
;
26728 unsigned int cr_save_regno
;
26729 rtx cr_save_rtx
= NULL_RTX
;
26732 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
26733 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
26734 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
26735 int using_split_stack
= (flag_split_stack
26736 && (lookup_attribute ("no_split_stack",
26737 DECL_ATTRIBUTES (cfun
->decl
))
26740 /* Offset to top of frame for frame_reg and sp respectively. */
26741 HOST_WIDE_INT frame_off
= 0;
26742 HOST_WIDE_INT sp_off
= 0;
26743 /* sp_adjust is the stack adjusting instruction, tracked so that the
26744 insn setting up the split-stack arg pointer can be emitted just
26745 prior to it, when r12 is not used here for other purposes. */
26746 rtx_insn
*sp_adjust
= 0;
26749 /* Track and check usage of r0, r11, r12. */
26750 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
26751 #define START_USE(R) do \
26753 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26754 reg_inuse |= 1 << (R); \
26756 #define END_USE(R) do \
26758 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26759 reg_inuse &= ~(1 << (R)); \
26761 #define NOT_INUSE(R) do \
26763 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26766 #define START_USE(R) do {} while (0)
26767 #define END_USE(R) do {} while (0)
26768 #define NOT_INUSE(R) do {} while (0)
26771 if (DEFAULT_ABI
== ABI_ELFv2
26772 && !TARGET_SINGLE_PIC_BASE
)
26774 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
26776 /* With -mminimal-toc we may generate an extra use of r2 below. */
26777 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
26778 && !constant_pool_empty_p ())
26779 cfun
->machine
->r2_setup_needed
= true;
26783 if (flag_stack_usage_info
)
26784 current_function_static_stack_size
= info
->total_size
;
26786 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
26788 HOST_WIDE_INT size
= info
->total_size
;
26790 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
26792 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
26793 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26794 size
- get_stack_check_protect ());
26797 rs6000_emit_probe_stack_range (get_stack_check_protect (), size
);
26800 if (TARGET_FIX_AND_CONTINUE
)
26802 /* gdb on darwin arranges to forward a function from the old
26803 address by modifying the first 5 instructions of the function
26804 to branch to the overriding function. This is necessary to
26805 permit function pointers that point to the old function to
26806 actually forward to the new function. */
26807 emit_insn (gen_nop ());
26808 emit_insn (gen_nop ());
26809 emit_insn (gen_nop ());
26810 emit_insn (gen_nop ());
26811 emit_insn (gen_nop ());
26814 /* Handle world saves specially here. */
26815 if (WORLD_SAVE_P (info
))
26822 /* save_world expects lr in r0. */
26823 reg0
= gen_rtx_REG (Pmode
, 0);
26824 if (info
->lr_save_p
)
26826 insn
= emit_move_insn (reg0
,
26827 gen_rtx_REG (Pmode
, LR_REGNO
));
26828 RTX_FRAME_RELATED_P (insn
) = 1;
26831 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26832 assumptions about the offsets of various bits of the stack
26834 gcc_assert (info
->gp_save_offset
== -220
26835 && info
->fp_save_offset
== -144
26836 && info
->lr_save_offset
== 8
26837 && info
->cr_save_offset
== 4
26840 && (!crtl
->calls_eh_return
26841 || info
->ehrd_offset
== -432)
26842 && info
->vrsave_save_offset
== -224
26843 && info
->altivec_save_offset
== -416);
26845 treg
= gen_rtx_REG (SImode
, 11);
26846 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
26848 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26849 in R11. It also clobbers R12, so beware! */
26851 /* Preserve CR2 for save_world prologues */
26853 sz
+= 32 - info
->first_gp_reg_save
;
26854 sz
+= 64 - info
->first_fp_reg_save
;
26855 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
26856 p
= rtvec_alloc (sz
);
26858 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
26859 gen_rtx_REG (SImode
,
26861 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
26862 gen_rtx_SYMBOL_REF (Pmode
,
26864 /* We do floats first so that the instruction pattern matches
26866 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
26868 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
? DFmode
: SFmode
,
26869 info
->first_fp_reg_save
+ i
),
26871 info
->fp_save_offset
+ frame_off
+ 8 * i
);
26872 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
26874 = gen_frame_store (gen_rtx_REG (V4SImode
,
26875 info
->first_altivec_reg_save
+ i
),
26877 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
26878 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26880 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26882 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26884 /* CR register traditionally saved as CR2. */
26886 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
26887 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
26888 /* Explain about use of R0. */
26889 if (info
->lr_save_p
)
26891 = gen_frame_store (reg0
,
26892 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
26893 /* Explain what happens to the stack pointer. */
26895 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
26896 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
26899 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26900 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26901 treg
, GEN_INT (-info
->total_size
));
26902 sp_off
= frame_off
= info
->total_size
;
26905 strategy
= info
->savres_strategy
;
26907 /* For V.4, update stack before we do any saving and set back pointer. */
26908 if (! WORLD_SAVE_P (info
)
26910 && (DEFAULT_ABI
== ABI_V4
26911 || crtl
->calls_eh_return
))
26913 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
26914 || !(strategy
& SAVE_INLINE_GPRS
)
26915 || !(strategy
& SAVE_INLINE_VRS
));
26916 int ptr_regno
= -1;
26917 rtx ptr_reg
= NULL_RTX
;
26920 if (info
->total_size
< 32767)
26921 frame_off
= info
->total_size
;
26924 else if (info
->cr_save_p
26926 || info
->first_fp_reg_save
< 64
26927 || info
->first_gp_reg_save
< 32
26928 || info
->altivec_size
!= 0
26929 || info
->vrsave_size
!= 0
26930 || crtl
->calls_eh_return
)
26934 /* The prologue won't be saving any regs so there is no need
26935 to set up a frame register to access any frame save area.
26936 We also won't be using frame_off anywhere below, but set
26937 the correct value anyway to protect against future
26938 changes to this function. */
26939 frame_off
= info
->total_size
;
26941 if (ptr_regno
!= -1)
26943 /* Set up the frame offset to that needed by the first
26944 out-of-line save function. */
26945 START_USE (ptr_regno
);
26946 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26947 frame_reg_rtx
= ptr_reg
;
26948 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
26949 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
26950 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
26951 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
26952 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
26953 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
26954 frame_off
= -ptr_off
;
26956 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
26958 if (REGNO (frame_reg_rtx
) == 12)
26960 sp_off
= info
->total_size
;
26961 if (frame_reg_rtx
!= sp_reg_rtx
)
26962 rs6000_emit_stack_tie (frame_reg_rtx
, false);
26965 /* If we use the link register, get it into r0. */
26966 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
26967 && !cfun
->machine
->lr_is_wrapped_separately
)
26969 rtx addr
, reg
, mem
;
26971 reg
= gen_rtx_REG (Pmode
, 0);
26973 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
26974 RTX_FRAME_RELATED_P (insn
) = 1;
26976 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
26977 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
26979 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
26980 GEN_INT (info
->lr_save_offset
+ frame_off
));
26981 mem
= gen_rtx_MEM (Pmode
, addr
);
26982 /* This should not be of rs6000_sr_alias_set, because of
26983 __builtin_return_address. */
26985 insn
= emit_move_insn (mem
, reg
);
26986 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26987 NULL_RTX
, NULL_RTX
);
26992 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26993 r12 will be needed by out-of-line gpr save. */
26994 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26995 && !(strategy
& (SAVE_INLINE_GPRS
26996 | SAVE_NOINLINE_GPRS_SAVES_LR
))
26998 if (!WORLD_SAVE_P (info
)
27000 && REGNO (frame_reg_rtx
) != cr_save_regno
27001 && !(using_static_chain_p
&& cr_save_regno
== 11)
27002 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
27004 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
27005 START_USE (cr_save_regno
);
27006 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27009 /* Do any required saving of fpr's. If only one or two to save, do
27010 it ourselves. Otherwise, call function. */
27011 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
27013 int offset
= info
->fp_save_offset
+ frame_off
;
27014 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
27017 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
27018 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
27019 sp_off
- frame_off
);
27021 offset
+= fp_reg_size
;
27024 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
27026 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27027 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27028 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27029 rtx ptr_reg
= frame_reg_rtx
;
27031 if (REGNO (frame_reg_rtx
) == ptr_regno
)
27032 gcc_checking_assert (frame_off
== 0);
27035 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27036 NOT_INUSE (ptr_regno
);
27037 emit_insn (gen_add3_insn (ptr_reg
,
27038 frame_reg_rtx
, GEN_INT (frame_off
)));
27040 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27041 info
->fp_save_offset
,
27042 info
->lr_save_offset
,
27044 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
27045 NULL_RTX
, NULL_RTX
);
27050 /* Save GPRs. This is done as a PARALLEL if we are using
27051 the store-multiple instructions. */
27052 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
27054 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
27055 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
27056 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27057 rtx ptr_reg
= frame_reg_rtx
;
27058 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
27059 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
27062 if (ptr_regno
== 12)
27065 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27067 /* Need to adjust r11 (r12) if we saved any FPRs. */
27068 if (end_save
+ frame_off
!= 0)
27070 rtx offset
= GEN_INT (end_save
+ frame_off
);
27073 frame_off
= -end_save
;
27075 NOT_INUSE (ptr_regno
);
27076 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27078 else if (!ptr_set_up
)
27080 NOT_INUSE (ptr_regno
);
27081 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27083 ptr_off
= -end_save
;
27084 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
27085 info
->gp_save_offset
+ ptr_off
,
27086 info
->lr_save_offset
+ ptr_off
,
27088 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
27089 NULL_RTX
, NULL_RTX
);
27093 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
27097 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
27098 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27100 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
27102 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
27103 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27104 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27105 NULL_RTX
, NULL_RTX
);
27107 else if (!WORLD_SAVE_P (info
))
27109 int offset
= info
->gp_save_offset
+ frame_off
;
27110 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
27113 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
27114 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
27115 sp_off
- frame_off
);
27117 offset
+= reg_size
;
27121 if (crtl
->calls_eh_return
)
27128 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27129 if (regno
== INVALID_REGNUM
)
27133 p
= rtvec_alloc (i
);
27137 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27138 if (regno
== INVALID_REGNUM
)
27142 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
27144 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
27145 RTVEC_ELT (p
, i
) = set
;
27146 RTX_FRAME_RELATED_P (set
) = 1;
27149 insn
= emit_insn (gen_blockage ());
27150 RTX_FRAME_RELATED_P (insn
) = 1;
27151 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
27154 /* In AIX ABI we need to make sure r2 is really saved. */
27155 if (TARGET_AIX
&& crtl
->calls_eh_return
)
27157 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
27158 rtx join_insn
, note
;
27159 rtx_insn
*save_insn
;
27160 long toc_restore_insn
;
27162 tmp_reg
= gen_rtx_REG (Pmode
, 11);
27163 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
27164 if (using_static_chain_p
)
27167 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
27171 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27172 /* Peek at instruction to which this function returns. If it's
27173 restoring r2, then we know we've already saved r2. We can't
27174 unconditionally save r2 because the value we have will already
27175 be updated if we arrived at this function via a plt call or
27176 toc adjusting stub. */
27177 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
27178 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
27179 + RS6000_TOC_SAVE_SLOT
);
27180 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
27181 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
27182 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
27183 validate_condition_mode (EQ
, CCUNSmode
);
27184 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
27185 emit_insn (gen_rtx_SET (compare_result
,
27186 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
27187 toc_save_done
= gen_label_rtx ();
27188 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27189 gen_rtx_EQ (VOIDmode
, compare_result
,
27191 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
27193 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27194 JUMP_LABEL (jump
) = toc_save_done
;
27195 LABEL_NUSES (toc_save_done
) += 1;
27197 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
27198 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
27199 sp_off
- frame_off
);
27201 emit_label (toc_save_done
);
27203 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27204 have a CFG that has different saves along different paths.
27205 Move the note to a dummy blockage insn, which describes that
27206 R2 is unconditionally saved after the label. */
27207 /* ??? An alternate representation might be a special insn pattern
27208 containing both the branch and the store. That might let the
27209 code that minimizes the number of DW_CFA_advance opcodes better
27210 freedom in placing the annotations. */
27211 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
27213 remove_note (save_insn
, note
);
27215 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
27216 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
27217 RTX_FRAME_RELATED_P (save_insn
) = 0;
27219 join_insn
= emit_insn (gen_blockage ());
27220 REG_NOTES (join_insn
) = note
;
27221 RTX_FRAME_RELATED_P (join_insn
) = 1;
27223 if (using_static_chain_p
)
27225 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
27232 /* Save CR if we use any that must be preserved. */
27233 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
27235 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27236 GEN_INT (info
->cr_save_offset
+ frame_off
));
27237 rtx mem
= gen_frame_mem (SImode
, addr
);
27239 /* If we didn't copy cr before, do so now using r0. */
27240 if (cr_save_rtx
== NULL_RTX
)
27243 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
27244 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
27247 /* Saving CR requires a two-instruction sequence: one instruction
27248 to move the CR to a general-purpose register, and a second
27249 instruction that stores the GPR to memory.
27251 We do not emit any DWARF CFI records for the first of these,
27252 because we cannot properly represent the fact that CR is saved in
27253 a register. One reason is that we cannot express that multiple
27254 CR fields are saved; another reason is that on 64-bit, the size
27255 of the CR register in DWARF (4 bytes) differs from the size of
27256 a general-purpose register.
27258 This means if any intervening instruction were to clobber one of
27259 the call-saved CR fields, we'd have incorrect CFI. To prevent
27260 this from happening, we mark the store to memory as a use of
27261 those CR fields, which prevents any such instruction from being
27262 scheduled in between the two instructions. */
27267 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
27268 for (i
= 0; i
< 8; i
++)
27269 if (save_reg_p (CR0_REGNO
+ i
))
27270 crsave_v
[n_crsave
++]
27271 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27273 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
27274 gen_rtvec_v (n_crsave
, crsave_v
)));
27275 END_USE (REGNO (cr_save_rtx
));
27277 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27278 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27279 so we need to construct a frame expression manually. */
27280 RTX_FRAME_RELATED_P (insn
) = 1;
27282 /* Update address to be stack-pointer relative, like
27283 rs6000_frame_related would do. */
27284 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
27285 GEN_INT (info
->cr_save_offset
+ sp_off
));
27286 mem
= gen_frame_mem (SImode
, addr
);
27288 if (DEFAULT_ABI
== ABI_ELFv2
)
27290 /* In the ELFv2 ABI we generate separate CFI records for each
27291 CR field that was actually saved. They all point to the
27292 same 32-bit stack slot. */
27296 for (i
= 0; i
< 8; i
++)
27297 if (save_reg_p (CR0_REGNO
+ i
))
27300 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27302 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
27306 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27307 gen_rtx_PARALLEL (VOIDmode
,
27308 gen_rtvec_v (n_crframe
, crframe
)));
27312 /* In other ABIs, by convention, we use a single CR regnum to
27313 represent the fact that all call-saved CR fields are saved.
27314 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27315 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
27316 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
27320 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27321 *separate* slots if the routine calls __builtin_eh_return, so
27322 that they can be independently restored by the unwinder. */
27323 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
27325 int i
, cr_off
= info
->ehcr_offset
;
27328 /* ??? We might get better performance by using multiple mfocrf
27330 crsave
= gen_rtx_REG (SImode
, 0);
27331 emit_insn (gen_prologue_movesi_from_cr (crsave
));
27333 for (i
= 0; i
< 8; i
++)
27334 if (!call_used_regs
[CR0_REGNO
+ i
])
27336 rtvec p
= rtvec_alloc (2);
27338 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
27340 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27342 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27344 RTX_FRAME_RELATED_P (insn
) = 1;
27345 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27346 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
27347 sp_reg_rtx
, cr_off
+ sp_off
));
27349 cr_off
+= reg_size
;
27353 /* If we are emitting stack probes, but allocate no stack, then
27354 just note that in the dump file. */
27355 if (flag_stack_clash_protection
27358 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME
, false);
27360 /* Update stack and set back pointer unless this is V.4,
27361 for which it was done previously. */
27362 if (!WORLD_SAVE_P (info
) && info
->push_p
27363 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
27365 rtx ptr_reg
= NULL
;
27368 /* If saving altivec regs we need to be able to address all save
27369 locations using a 16-bit offset. */
27370 if ((strategy
& SAVE_INLINE_VRS
) == 0
27371 || (info
->altivec_size
!= 0
27372 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
27373 + info
->total_size
- frame_off
) > 32767)
27374 || (info
->vrsave_size
!= 0
27375 && (info
->vrsave_save_offset
27376 + info
->total_size
- frame_off
) > 32767))
27378 int sel
= SAVRES_SAVE
| SAVRES_VR
;
27379 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27381 if (using_static_chain_p
27382 && ptr_regno
== STATIC_CHAIN_REGNUM
)
27384 if (REGNO (frame_reg_rtx
) != ptr_regno
)
27385 START_USE (ptr_regno
);
27386 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27387 frame_reg_rtx
= ptr_reg
;
27388 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27389 frame_off
= -ptr_off
;
27391 else if (REGNO (frame_reg_rtx
) == 1)
27392 frame_off
= info
->total_size
;
27393 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27395 if (REGNO (frame_reg_rtx
) == 12)
27397 sp_off
= info
->total_size
;
27398 if (frame_reg_rtx
!= sp_reg_rtx
)
27399 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27402 /* Set frame pointer, if needed. */
27403 if (frame_pointer_needed
)
27405 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
27407 RTX_FRAME_RELATED_P (insn
) = 1;
27410 /* Save AltiVec registers if needed. Save here because the red zone does
27411 not always include AltiVec registers. */
27412 if (!WORLD_SAVE_P (info
)
27413 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27415 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27417 /* Oddly, the vector save/restore functions point r0 at the end
27418 of the save area, then use r11 or r12 to load offsets for
27419 [reg+reg] addressing. */
27420 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27421 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27422 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27424 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27426 if (scratch_regno
== 12)
27428 if (end_save
+ frame_off
!= 0)
27430 rtx offset
= GEN_INT (end_save
+ frame_off
);
27432 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27435 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27437 ptr_off
= -end_save
;
27438 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27439 info
->altivec_save_offset
+ ptr_off
,
27440 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27441 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27442 NULL_RTX
, NULL_RTX
);
27443 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27445 /* The oddity mentioned above clobbered our frame reg. */
27446 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27447 frame_off
= ptr_off
;
27450 else if (!WORLD_SAVE_P (info
)
27451 && info
->altivec_size
!= 0)
27455 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27456 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27458 rtx areg
, savereg
, mem
;
27459 HOST_WIDE_INT offset
;
27461 offset
= (info
->altivec_save_offset
+ frame_off
27462 + 16 * (i
- info
->first_altivec_reg_save
));
27464 savereg
= gen_rtx_REG (V4SImode
, i
);
27466 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27468 mem
= gen_frame_mem (V4SImode
,
27469 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27470 GEN_INT (offset
)));
27471 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27477 areg
= gen_rtx_REG (Pmode
, 0);
27478 emit_move_insn (areg
, GEN_INT (offset
));
27480 /* AltiVec addressing mode is [reg+reg]. */
27481 mem
= gen_frame_mem (V4SImode
,
27482 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27484 /* Rather than emitting a generic move, force use of the stvx
27485 instruction, which we always want on ISA 2.07 (power8) systems.
27486 In particular we don't want xxpermdi/stxvd2x for little
27488 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27491 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27492 areg
, GEN_INT (offset
));
27496 /* VRSAVE is a bit vector representing which AltiVec registers
27497 are used. The OS uses this to determine which vector
27498 registers to save on a context switch. We need to save
27499 VRSAVE on the stack frame, add whatever AltiVec registers we
27500 used in this function, and do the corresponding magic in the
27503 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27505 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27506 be using r12 as frame_reg_rtx and r11 as the static chain
27507 pointer for nested functions. */
27508 int save_regno
= 12;
27509 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27510 && !using_static_chain_p
)
27512 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27515 if (using_static_chain_p
)
27518 NOT_INUSE (save_regno
);
27520 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27523 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27524 if (!TARGET_SINGLE_PIC_BASE
27525 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27526 && !constant_pool_empty_p ())
27527 || (DEFAULT_ABI
== ABI_V4
27528 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27529 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27531 /* If emit_load_toc_table will use the link register, we need to save
27532 it. We use R12 for this purpose because emit_load_toc_table
27533 can use register 0. This allows us to use a plain 'blr' to return
27534 from the procedure more often. */
27535 int save_LR_around_toc_setup
= (TARGET_ELF
27536 && DEFAULT_ABI
== ABI_V4
27538 && ! info
->lr_save_p
27539 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27540 if (save_LR_around_toc_setup
)
27542 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27543 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27546 insn
= emit_move_insn (tmp
, lr
);
27547 RTX_FRAME_RELATED_P (insn
) = 1;
27549 rs6000_emit_load_toc_table (TRUE
);
27551 insn
= emit_move_insn (lr
, tmp
);
27552 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27553 RTX_FRAME_RELATED_P (insn
) = 1;
27556 rs6000_emit_load_toc_table (TRUE
);
27560 if (!TARGET_SINGLE_PIC_BASE
27561 && DEFAULT_ABI
== ABI_DARWIN
27562 && flag_pic
&& crtl
->uses_pic_offset_table
)
27564 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27565 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27567 /* Save and restore LR locally around this call (in R0). */
27568 if (!info
->lr_save_p
)
27569 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27571 emit_insn (gen_load_macho_picbase (src
));
27573 emit_move_insn (gen_rtx_REG (Pmode
,
27574 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27577 if (!info
->lr_save_p
)
27578 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27582 /* If we need to, save the TOC register after doing the stack setup.
27583 Do not emit eh frame info for this save. The unwinder wants info,
27584 conceptually attached to instructions in this function, about
27585 register values in the caller of this function. This R2 may have
27586 already been changed from the value in the caller.
27587 We don't attempt to write accurate DWARF EH frame info for R2
27588 because code emitted by gcc for a (non-pointer) function call
27589 doesn't save and restore R2. Instead, R2 is managed out-of-line
27590 by a linker generated plt call stub when the function resides in
27591 a shared library. This behavior is costly to describe in DWARF,
27592 both in terms of the size of DWARF info and the time taken in the
27593 unwinder to interpret it. R2 changes, apart from the
27594 calls_eh_return case earlier in this function, are handled by
27595 linux-unwind.h frob_update_context. */
27596 if (rs6000_save_toc_in_prologue_p ()
27597 && !cfun
->machine
->toc_is_wrapped_separately
)
27599 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27600 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27603 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27604 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27605 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27608 /* Output .extern statements for the save/restore routines we use. */
27611 rs6000_output_savres_externs (FILE *file
)
27613 rs6000_stack_t
*info
= rs6000_stack_info ();
27615 if (TARGET_DEBUG_STACK
)
27616 debug_stack_info (info
);
27618 /* Write .extern for any function we will call to save and restore
27620 if (info
->first_fp_reg_save
< 64
27625 int regno
= info
->first_fp_reg_save
- 32;
27627 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27629 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27630 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27631 name
= rs6000_savres_routine_name (regno
, sel
);
27632 fprintf (file
, "\t.extern %s\n", name
);
27634 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27636 bool lr
= (info
->savres_strategy
27637 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27638 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27639 name
= rs6000_savres_routine_name (regno
, sel
);
27640 fprintf (file
, "\t.extern %s\n", name
);
27645 /* Write function prologue. */
27648 rs6000_output_function_prologue (FILE *file
)
27650 if (!cfun
->is_thunk
)
27651 rs6000_output_savres_externs (file
);
27653 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27654 immediately after the global entry point label. */
27655 if (rs6000_global_entry_point_needed_p ())
27657 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27659 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27661 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27663 /* In the small and medium code models, we assume the TOC is less
27664 2 GB away from the text section, so it can be computed via the
27665 following two-instruction sequence. */
27668 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27669 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27670 assemble_name (file
, buf
);
27671 fprintf (file
, "@ha\n");
27672 fprintf (file
, "\taddi 2,2,.TOC.-");
27673 assemble_name (file
, buf
);
27674 fprintf (file
, "@l\n");
27678 /* In the large code model, we allow arbitrary offsets between the
27679 TOC and the text section, so we have to load the offset from
27680 memory. The data field is emitted directly before the global
27681 entry point in rs6000_elf_declare_function_name. */
27684 #ifdef HAVE_AS_ENTRY_MARKERS
27685 /* If supported by the linker, emit a marker relocation. If the
27686 total code size of the final executable or shared library
27687 happens to fit into 2 GB after all, the linker will replace
27688 this code sequence with the sequence for the small or medium
27690 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
27692 fprintf (file
, "\tld 2,");
27693 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
27694 assemble_name (file
, buf
);
27695 fprintf (file
, "-");
27696 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27697 assemble_name (file
, buf
);
27698 fprintf (file
, "(12)\n");
27699 fprintf (file
, "\tadd 2,2,12\n");
27702 fputs ("\t.localentry\t", file
);
27703 assemble_name (file
, name
);
27704 fputs (",.-", file
);
27705 assemble_name (file
, name
);
27706 fputs ("\n", file
);
27709 /* Output -mprofile-kernel code. This needs to be done here instead of
27710 in output_function_profile since it must go after the ELFv2 ABI
27711 local entry point. */
27712 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
27714 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
27715 gcc_assert (!TARGET_32BIT
);
27717 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
27719 /* In the ELFv2 ABI we have no compiler stack word. It must be
27720 the resposibility of _mcount to preserve the static chain
27721 register if required. */
27722 if (DEFAULT_ABI
!= ABI_ELFv2
27723 && cfun
->static_chain_decl
!= NULL
)
27725 asm_fprintf (file
, "\tstd %s,24(%s)\n",
27726 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27727 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27728 asm_fprintf (file
, "\tld %s,24(%s)\n",
27729 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27732 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27735 rs6000_pic_labelno
++;
27738 /* -mprofile-kernel code calls mcount before the function prolog,
27739 so a profiled leaf function should stay a leaf function. */
27741 rs6000_keep_leaf_when_profiled ()
27743 return TARGET_PROFILE_KERNEL
;
27746 /* Non-zero if vmx regs are restored before the frame pop, zero if
27747 we restore after the pop when possible. */
27748 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27750 /* Restoring cr is a two step process: loading a reg from the frame
27751 save, then moving the reg to cr. For ABI_V4 we must let the
27752 unwinder know that the stack location is no longer valid at or
27753 before the stack deallocation, but we can't emit a cfa_restore for
27754 cr at the stack deallocation like we do for other registers.
27755 The trouble is that it is possible for the move to cr to be
27756 scheduled after the stack deallocation. So say exactly where cr
27757 is located on each of the two insns. */
27760 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
27762 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
27763 rtx reg
= gen_rtx_REG (SImode
, regno
);
27764 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
27766 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
27768 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27769 rtx set
= gen_rtx_SET (reg
, cr
);
27771 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
27772 RTX_FRAME_RELATED_P (insn
) = 1;
27777 /* Reload CR from REG. */
27780 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
27785 if (using_mfcr_multiple
)
27787 for (i
= 0; i
< 8; i
++)
27788 if (save_reg_p (CR0_REGNO
+ i
))
27790 gcc_assert (count
);
27793 if (using_mfcr_multiple
&& count
> 1)
27799 p
= rtvec_alloc (count
);
27802 for (i
= 0; i
< 8; i
++)
27803 if (save_reg_p (CR0_REGNO
+ i
))
27805 rtvec r
= rtvec_alloc (2);
27806 RTVEC_ELT (r
, 0) = reg
;
27807 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
27808 RTVEC_ELT (p
, ndx
) =
27809 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
27810 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
27813 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27814 gcc_assert (ndx
== count
);
27816 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27817 CR field separately. */
27818 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27820 for (i
= 0; i
< 8; i
++)
27821 if (save_reg_p (CR0_REGNO
+ i
))
27822 add_reg_note (insn
, REG_CFA_RESTORE
,
27823 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27825 RTX_FRAME_RELATED_P (insn
) = 1;
27829 for (i
= 0; i
< 8; i
++)
27830 if (save_reg_p (CR0_REGNO
+ i
))
27832 rtx insn
= emit_insn (gen_movsi_to_cr_one
27833 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
27835 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27836 CR field separately, attached to the insn that in fact
27837 restores this particular CR field. */
27838 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27840 add_reg_note (insn
, REG_CFA_RESTORE
,
27841 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27843 RTX_FRAME_RELATED_P (insn
) = 1;
27847 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27848 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
27849 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
27851 rtx_insn
*insn
= get_last_insn ();
27852 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27854 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
27855 RTX_FRAME_RELATED_P (insn
) = 1;
27859 /* Like cr, the move to lr instruction can be scheduled after the
27860 stack deallocation, but unlike cr, its stack frame save is still
27861 valid. So we only need to emit the cfa_restore on the correct
27865 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
27867 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
27868 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27870 emit_move_insn (reg
, mem
);
27874 restore_saved_lr (int regno
, bool exit_func
)
27876 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27877 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27878 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
27880 if (!exit_func
&& flag_shrink_wrap
)
27882 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27883 RTX_FRAME_RELATED_P (insn
) = 1;
27888 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
27890 if (DEFAULT_ABI
== ABI_ELFv2
)
27893 for (i
= 0; i
< 8; i
++)
27894 if (save_reg_p (CR0_REGNO
+ i
))
27896 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
27897 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
27901 else if (info
->cr_save_p
)
27902 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27903 gen_rtx_REG (SImode
, CR2_REGNO
),
27906 if (info
->lr_save_p
)
27907 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27908 gen_rtx_REG (Pmode
, LR_REGNO
),
27910 return cfa_restores
;
27913 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27914 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27915 below stack pointer not cloberred by signals. */
27918 offset_below_red_zone_p (HOST_WIDE_INT offset
)
27920 return offset
< (DEFAULT_ABI
== ABI_V4
27922 : TARGET_32BIT
? -220 : -288);
27925 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27928 emit_cfa_restores (rtx cfa_restores
)
27930 rtx_insn
*insn
= get_last_insn ();
27931 rtx
*loc
= ®_NOTES (insn
);
27934 loc
= &XEXP (*loc
, 1);
27935 *loc
= cfa_restores
;
27936 RTX_FRAME_RELATED_P (insn
) = 1;
27939 /* Emit function epilogue as insns. */
27942 rs6000_emit_epilogue (int sibcall
)
27944 rs6000_stack_t
*info
;
27945 int restoring_GPRs_inline
;
27946 int restoring_FPRs_inline
;
27947 int using_load_multiple
;
27948 int using_mtcr_multiple
;
27949 int use_backchain_to_restore_sp
;
27952 HOST_WIDE_INT frame_off
= 0;
27953 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
27954 rtx frame_reg_rtx
= sp_reg_rtx
;
27955 rtx cfa_restores
= NULL_RTX
;
27957 rtx cr_save_reg
= NULL_RTX
;
27958 machine_mode reg_mode
= Pmode
;
27959 int reg_size
= TARGET_32BIT
? 4 : 8;
27960 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
27961 int fp_reg_size
= 8;
27964 unsigned ptr_regno
;
27966 info
= rs6000_stack_info ();
27968 strategy
= info
->savres_strategy
;
27969 using_load_multiple
= strategy
& REST_MULTIPLE
;
27970 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
27971 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
27972 using_mtcr_multiple
= (rs6000_tune
== PROCESSOR_PPC601
27973 || rs6000_tune
== PROCESSOR_PPC603
27974 || rs6000_tune
== PROCESSOR_PPC750
27976 /* Restore via the backchain when we have a large frame, since this
27977 is more efficient than an addis, addi pair. The second condition
27978 here will not trigger at the moment; We don't actually need a
27979 frame pointer for alloca, but the generic parts of the compiler
27980 give us one anyway. */
27981 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
27982 ? info
->lr_save_offset
27984 || (cfun
->calls_alloca
27985 && !frame_pointer_needed
));
27986 restore_lr
= (info
->lr_save_p
27987 && (restoring_FPRs_inline
27988 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
27989 && (restoring_GPRs_inline
27990 || info
->first_fp_reg_save
< 64)
27991 && !cfun
->machine
->lr_is_wrapped_separately
);
27994 if (WORLD_SAVE_P (info
))
27998 const char *alloc_rname
;
28001 /* eh_rest_world_r10 will return to the location saved in the LR
28002 stack slot (which is not likely to be our caller.)
28003 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28004 rest_world is similar, except any R10 parameter is ignored.
28005 The exception-handling stuff that was here in 2.95 is no
28006 longer necessary. */
28009 + 32 - info
->first_gp_reg_save
28010 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
28011 + 63 + 1 - info
->first_fp_reg_save
);
28013 strcpy (rname
, ((crtl
->calls_eh_return
) ?
28014 "*eh_rest_world_r10" : "*rest_world"));
28015 alloc_rname
= ggc_strdup (rname
);
28018 RTVEC_ELT (p
, j
++) = ret_rtx
;
28020 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
28021 /* The instruction pattern requires a clobber here;
28022 it is shared with the restVEC helper. */
28024 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
28027 /* CR register traditionally saved as CR2. */
28028 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
28030 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
28031 if (flag_shrink_wrap
)
28033 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
28034 gen_rtx_REG (Pmode
, LR_REGNO
),
28036 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28040 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28042 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
28044 = gen_frame_load (reg
,
28045 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
28046 if (flag_shrink_wrap
28047 && save_reg_p (info
->first_gp_reg_save
+ i
))
28048 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28050 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
28052 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
28054 = gen_frame_load (reg
,
28055 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
28056 if (flag_shrink_wrap
28057 && save_reg_p (info
->first_altivec_reg_save
+ i
))
28058 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28060 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
28062 rtx reg
= gen_rtx_REG (TARGET_HARD_FLOAT
? DFmode
: SFmode
,
28063 info
->first_fp_reg_save
+ i
);
28065 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28066 if (flag_shrink_wrap
28067 && save_reg_p (info
->first_fp_reg_save
+ i
))
28068 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28071 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
28073 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
28075 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
28077 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
28079 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
28080 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28082 if (flag_shrink_wrap
)
28084 REG_NOTES (insn
) = cfa_restores
;
28085 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28086 RTX_FRAME_RELATED_P (insn
) = 1;
28091 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28093 frame_off
= info
->total_size
;
28095 /* Restore AltiVec registers if we must do so before adjusting the
28097 if (info
->altivec_size
!= 0
28098 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28099 || (DEFAULT_ABI
!= ABI_V4
28100 && offset_below_red_zone_p (info
->altivec_save_offset
))))
28103 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28105 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
28106 if (use_backchain_to_restore_sp
)
28108 int frame_regno
= 11;
28110 if ((strategy
& REST_INLINE_VRS
) == 0)
28112 /* Of r11 and r12, select the one not clobbered by an
28113 out-of-line restore function for the frame register. */
28114 frame_regno
= 11 + 12 - scratch_regno
;
28116 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
28117 emit_move_insn (frame_reg_rtx
,
28118 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28121 else if (frame_pointer_needed
)
28122 frame_reg_rtx
= hard_frame_pointer_rtx
;
28124 if ((strategy
& REST_INLINE_VRS
) == 0)
28126 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28128 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28129 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28131 if (end_save
+ frame_off
!= 0)
28133 rtx offset
= GEN_INT (end_save
+ frame_off
);
28135 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28138 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28140 ptr_off
= -end_save
;
28141 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28142 info
->altivec_save_offset
+ ptr_off
,
28143 0, V4SImode
, SAVRES_VR
);
28147 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28148 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28150 rtx addr
, areg
, mem
, insn
;
28151 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28152 HOST_WIDE_INT offset
28153 = (info
->altivec_save_offset
+ frame_off
28154 + 16 * (i
- info
->first_altivec_reg_save
));
28156 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28158 mem
= gen_frame_mem (V4SImode
,
28159 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28160 GEN_INT (offset
)));
28161 insn
= gen_rtx_SET (reg
, mem
);
28165 areg
= gen_rtx_REG (Pmode
, 0);
28166 emit_move_insn (areg
, GEN_INT (offset
));
28168 /* AltiVec addressing mode is [reg+reg]. */
28169 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28170 mem
= gen_frame_mem (V4SImode
, addr
);
28172 /* Rather than emitting a generic move, force use of the
28173 lvx instruction, which we always want. In particular we
28174 don't want lxvd2x/xxpermdi for little endian. */
28175 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28178 (void) emit_insn (insn
);
28182 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28183 if (((strategy
& REST_INLINE_VRS
) == 0
28184 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28185 && (flag_shrink_wrap
28186 || (offset_below_red_zone_p
28187 (info
->altivec_save_offset
28188 + 16 * (i
- info
->first_altivec_reg_save
))))
28191 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28192 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28196 /* Restore VRSAVE if we must do so before adjusting the stack. */
28197 if (info
->vrsave_size
!= 0
28198 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28199 || (DEFAULT_ABI
!= ABI_V4
28200 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
28204 if (frame_reg_rtx
== sp_reg_rtx
)
28206 if (use_backchain_to_restore_sp
)
28208 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28209 emit_move_insn (frame_reg_rtx
,
28210 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28213 else if (frame_pointer_needed
)
28214 frame_reg_rtx
= hard_frame_pointer_rtx
;
28217 reg
= gen_rtx_REG (SImode
, 12);
28218 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28219 info
->vrsave_save_offset
+ frame_off
));
28221 emit_insn (generate_set_vrsave (reg
, info
, 1));
28225 /* If we have a large stack frame, restore the old stack pointer
28226 using the backchain. */
28227 if (use_backchain_to_restore_sp
)
28229 if (frame_reg_rtx
== sp_reg_rtx
)
28231 /* Under V.4, don't reset the stack pointer until after we're done
28232 loading the saved registers. */
28233 if (DEFAULT_ABI
== ABI_V4
)
28234 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28236 insn
= emit_move_insn (frame_reg_rtx
,
28237 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28240 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28241 && DEFAULT_ABI
== ABI_V4
)
28242 /* frame_reg_rtx has been set up by the altivec restore. */
28246 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
28247 frame_reg_rtx
= sp_reg_rtx
;
28250 /* If we have a frame pointer, we can restore the old stack pointer
28252 else if (frame_pointer_needed
)
28254 frame_reg_rtx
= sp_reg_rtx
;
28255 if (DEFAULT_ABI
== ABI_V4
)
28256 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28257 /* Prevent reordering memory accesses against stack pointer restore. */
28258 else if (cfun
->calls_alloca
28259 || offset_below_red_zone_p (-info
->total_size
))
28260 rs6000_emit_stack_tie (frame_reg_rtx
, true);
28262 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
28263 GEN_INT (info
->total_size
)));
28266 else if (info
->push_p
28267 && DEFAULT_ABI
!= ABI_V4
28268 && !crtl
->calls_eh_return
)
28270 /* Prevent reordering memory accesses against stack pointer restore. */
28271 if (cfun
->calls_alloca
28272 || offset_below_red_zone_p (-info
->total_size
))
28273 rs6000_emit_stack_tie (frame_reg_rtx
, false);
28274 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
28275 GEN_INT (info
->total_size
)));
28278 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
28282 REG_NOTES (insn
) = cfa_restores
;
28283 cfa_restores
= NULL_RTX
;
28285 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28286 RTX_FRAME_RELATED_P (insn
) = 1;
28289 /* Restore AltiVec registers if we have not done so already. */
28290 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28291 && info
->altivec_size
!= 0
28292 && (DEFAULT_ABI
== ABI_V4
28293 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
28297 if ((strategy
& REST_INLINE_VRS
) == 0)
28299 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28301 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28302 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28303 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28305 if (end_save
+ frame_off
!= 0)
28307 rtx offset
= GEN_INT (end_save
+ frame_off
);
28309 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28312 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28314 ptr_off
= -end_save
;
28315 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28316 info
->altivec_save_offset
+ ptr_off
,
28317 0, V4SImode
, SAVRES_VR
);
28318 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
28320 /* Frame reg was clobbered by out-of-line save. Restore it
28321 from ptr_reg, and if we are calling out-of-line gpr or
28322 fpr restore set up the correct pointer and offset. */
28323 unsigned newptr_regno
= 1;
28324 if (!restoring_GPRs_inline
)
28326 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28327 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28328 newptr_regno
= ptr_regno_for_savres (sel
);
28329 end_save
= info
->gp_save_offset
+ info
->gp_size
;
28331 else if (!restoring_FPRs_inline
)
28333 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
28334 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28335 newptr_regno
= ptr_regno_for_savres (sel
);
28336 end_save
= info
->fp_save_offset
+ info
->fp_size
;
28339 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
28340 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
28342 if (end_save
+ ptr_off
!= 0)
28344 rtx offset
= GEN_INT (end_save
+ ptr_off
);
28346 frame_off
= -end_save
;
28348 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
28351 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
28356 frame_off
= ptr_off
;
28357 emit_move_insn (frame_reg_rtx
, ptr_reg
);
28363 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28364 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28366 rtx addr
, areg
, mem
, insn
;
28367 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28368 HOST_WIDE_INT offset
28369 = (info
->altivec_save_offset
+ frame_off
28370 + 16 * (i
- info
->first_altivec_reg_save
));
28372 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28374 mem
= gen_frame_mem (V4SImode
,
28375 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28376 GEN_INT (offset
)));
28377 insn
= gen_rtx_SET (reg
, mem
);
28381 areg
= gen_rtx_REG (Pmode
, 0);
28382 emit_move_insn (areg
, GEN_INT (offset
));
28384 /* AltiVec addressing mode is [reg+reg]. */
28385 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28386 mem
= gen_frame_mem (V4SImode
, addr
);
28388 /* Rather than emitting a generic move, force use of the
28389 lvx instruction, which we always want. In particular we
28390 don't want lxvd2x/xxpermdi for little endian. */
28391 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28394 (void) emit_insn (insn
);
28398 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28399 if (((strategy
& REST_INLINE_VRS
) == 0
28400 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28401 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28404 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28405 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28409 /* Restore VRSAVE if we have not done so already. */
28410 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28411 && info
->vrsave_size
!= 0
28412 && (DEFAULT_ABI
== ABI_V4
28413 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28417 reg
= gen_rtx_REG (SImode
, 12);
28418 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28419 info
->vrsave_save_offset
+ frame_off
));
28421 emit_insn (generate_set_vrsave (reg
, info
, 1));
28424 /* If we exit by an out-of-line restore function on ABI_V4 then that
28425 function will deallocate the stack, so we don't need to worry
28426 about the unwinder restoring cr from an invalid stack frame
28428 exit_func
= (!restoring_FPRs_inline
28429 || (!restoring_GPRs_inline
28430 && info
->first_fp_reg_save
== 64));
28432 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28433 *separate* slots if the routine calls __builtin_eh_return, so
28434 that they can be independently restored by the unwinder. */
28435 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28437 int i
, cr_off
= info
->ehcr_offset
;
28439 for (i
= 0; i
< 8; i
++)
28440 if (!call_used_regs
[CR0_REGNO
+ i
])
28442 rtx reg
= gen_rtx_REG (SImode
, 0);
28443 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28444 cr_off
+ frame_off
));
28446 insn
= emit_insn (gen_movsi_to_cr_one
28447 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28449 if (!exit_func
&& flag_shrink_wrap
)
28451 add_reg_note (insn
, REG_CFA_RESTORE
,
28452 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28454 RTX_FRAME_RELATED_P (insn
) = 1;
28457 cr_off
+= reg_size
;
28461 /* Get the old lr if we saved it. If we are restoring registers
28462 out-of-line, then the out-of-line routines can do this for us. */
28463 if (restore_lr
&& restoring_GPRs_inline
)
28464 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28466 /* Get the old cr if we saved it. */
28467 if (info
->cr_save_p
)
28469 unsigned cr_save_regno
= 12;
28471 if (!restoring_GPRs_inline
)
28473 /* Ensure we don't use the register used by the out-of-line
28474 gpr register restore below. */
28475 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28476 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28477 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28479 if (gpr_ptr_regno
== 12)
28480 cr_save_regno
= 11;
28481 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28483 else if (REGNO (frame_reg_rtx
) == 12)
28484 cr_save_regno
= 11;
28486 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28487 info
->cr_save_offset
+ frame_off
,
28491 /* Set LR here to try to overlap restores below. */
28492 if (restore_lr
&& restoring_GPRs_inline
)
28493 restore_saved_lr (0, exit_func
);
28495 /* Load exception handler data registers, if needed. */
28496 if (crtl
->calls_eh_return
)
28498 unsigned int i
, regno
;
28502 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28503 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28504 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28511 regno
= EH_RETURN_DATA_REGNO (i
);
28512 if (regno
== INVALID_REGNUM
)
28515 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28516 info
->ehrd_offset
+ frame_off
28517 + reg_size
* (int) i
);
28519 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28523 /* Restore GPRs. This is done as a PARALLEL if we are using
28524 the load-multiple instructions. */
28525 if (!restoring_GPRs_inline
)
28527 /* We are jumping to an out-of-line function. */
28529 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28530 bool can_use_exit
= end_save
== 0;
28531 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28534 /* Emit stack reset code if we need it. */
28535 ptr_regno
= ptr_regno_for_savres (sel
);
28536 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28538 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28539 else if (end_save
+ frame_off
!= 0)
28540 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28541 GEN_INT (end_save
+ frame_off
)));
28542 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28543 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28544 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28545 frame_off
= -end_save
;
28547 if (can_use_exit
&& info
->cr_save_p
)
28548 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28550 ptr_off
= -end_save
;
28551 rs6000_emit_savres_rtx (info
, ptr_reg
,
28552 info
->gp_save_offset
+ ptr_off
,
28553 info
->lr_save_offset
+ ptr_off
,
28556 else if (using_load_multiple
)
28559 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28560 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28562 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28564 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28565 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28569 int offset
= info
->gp_save_offset
+ frame_off
;
28570 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28573 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28575 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28576 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28579 offset
+= reg_size
;
28583 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28585 /* If the frame pointer was used then we can't delay emitting
28586 a REG_CFA_DEF_CFA note. This must happen on the insn that
28587 restores the frame pointer, r31. We may have already emitted
28588 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28589 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28590 be harmless if emitted. */
28591 if (frame_pointer_needed
)
28593 insn
= get_last_insn ();
28594 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28595 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28596 RTX_FRAME_RELATED_P (insn
) = 1;
28599 /* Set up cfa_restores. We always need these when
28600 shrink-wrapping. If not shrink-wrapping then we only need
28601 the cfa_restore when the stack location is no longer valid.
28602 The cfa_restores must be emitted on or before the insn that
28603 invalidates the stack, and of course must not be emitted
28604 before the insn that actually does the restore. The latter
28605 is why it is a bad idea to emit the cfa_restores as a group
28606 on the last instruction here that actually does a restore:
28607 That insn may be reordered with respect to others doing
28609 if (flag_shrink_wrap
28610 && !restoring_GPRs_inline
28611 && info
->first_fp_reg_save
== 64)
28612 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28614 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28616 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28618 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28619 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28623 if (!restoring_GPRs_inline
28624 && info
->first_fp_reg_save
== 64)
28626 /* We are jumping to an out-of-line function. */
28628 emit_cfa_restores (cfa_restores
);
28632 if (restore_lr
&& !restoring_GPRs_inline
)
28634 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28635 restore_saved_lr (0, exit_func
);
28638 /* Restore fpr's if we need to do it without calling a function. */
28639 if (restoring_FPRs_inline
)
28641 int offset
= info
->fp_save_offset
+ frame_off
;
28642 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28645 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28647 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28648 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28649 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28650 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28654 offset
+= fp_reg_size
;
28658 /* If we saved cr, restore it here. Just those that were used. */
28659 if (info
->cr_save_p
)
28660 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28662 /* If this is V.4, unwind the stack pointer after all of the loads
28663 have been done, or set up r11 if we are restoring fp out of line. */
28665 if (!restoring_FPRs_inline
)
28667 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28668 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28669 ptr_regno
= ptr_regno_for_savres (sel
);
28672 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28673 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28676 if (insn
&& restoring_FPRs_inline
)
28680 REG_NOTES (insn
) = cfa_restores
;
28681 cfa_restores
= NULL_RTX
;
28683 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28684 RTX_FRAME_RELATED_P (insn
) = 1;
28687 if (crtl
->calls_eh_return
)
28689 rtx sa
= EH_RETURN_STACKADJ_RTX
;
28690 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
28693 if (!sibcall
&& restoring_FPRs_inline
)
28697 /* We can't hang the cfa_restores off a simple return,
28698 since the shrink-wrap code sometimes uses an existing
28699 return. This means there might be a path from
28700 pre-prologue code to this return, and dwarf2cfi code
28701 wants the eh_frame unwinder state to be the same on
28702 all paths to any point. So we need to emit the
28703 cfa_restores before the return. For -m64 we really
28704 don't need epilogue cfa_restores at all, except for
28705 this irritating dwarf2cfi with shrink-wrap
28706 requirement; The stack red-zone means eh_frame info
28707 from the prologue telling the unwinder to restore
28708 from the stack is perfectly good right to the end of
28710 emit_insn (gen_blockage ());
28711 emit_cfa_restores (cfa_restores
);
28712 cfa_restores
= NULL_RTX
;
28715 emit_jump_insn (targetm
.gen_simple_return ());
28718 if (!sibcall
&& !restoring_FPRs_inline
)
28720 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28721 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
28723 RTVEC_ELT (p
, elt
++) = ret_rtx
;
28725 RTVEC_ELT (p
, elt
++)
28726 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
28728 /* We have to restore more than two FP registers, so branch to the
28729 restore function. It will return to our caller. */
28734 if (flag_shrink_wrap
)
28735 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28737 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
28738 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
28739 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
28740 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
28742 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
28744 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
28746 RTVEC_ELT (p
, elt
++)
28747 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28748 if (flag_shrink_wrap
28749 && save_reg_p (info
->first_fp_reg_save
+ i
))
28750 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28753 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28759 /* Ensure the cfa_restores are hung off an insn that won't
28760 be reordered above other restores. */
28761 emit_insn (gen_blockage ());
28763 emit_cfa_restores (cfa_restores
);
28767 /* Write function epilogue. */
28770 rs6000_output_function_epilogue (FILE *file
)
28773 macho_branch_islands ();
28776 rtx_insn
*insn
= get_last_insn ();
28777 rtx_insn
*deleted_debug_label
= NULL
;
28779 /* Mach-O doesn't support labels at the end of objects, so if
28780 it looks like we might want one, take special action.
28782 First, collect any sequence of deleted debug labels. */
28785 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
28787 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28788 notes only, instead set their CODE_LABEL_NUMBER to -1,
28789 otherwise there would be code generation differences
28790 in between -g and -g0. */
28791 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28792 deleted_debug_label
= insn
;
28793 insn
= PREV_INSN (insn
);
28796 /* Second, if we have:
28799 then this needs to be detected, so skip past the barrier. */
28801 if (insn
&& BARRIER_P (insn
))
28802 insn
= PREV_INSN (insn
);
28804 /* Up to now we've only seen notes or barriers. */
28809 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
28810 /* Trailing label: <barrier>. */
28811 fputs ("\tnop\n", file
);
28814 /* Lastly, see if we have a completely empty function body. */
28815 while (insn
&& ! INSN_P (insn
))
28816 insn
= PREV_INSN (insn
);
28817 /* If we don't find any insns, we've got an empty function body;
28818 I.e. completely empty - without a return or branch. This is
28819 taken as the case where a function body has been removed
28820 because it contains an inline __builtin_unreachable(). GCC
28821 states that reaching __builtin_unreachable() means UB so we're
28822 not obliged to do anything special; however, we want
28823 non-zero-sized function bodies. To meet this, and help the
28824 user out, let's trap the case. */
28826 fputs ("\ttrap\n", file
);
28829 else if (deleted_debug_label
)
28830 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
28831 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28832 CODE_LABEL_NUMBER (insn
) = -1;
28836 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28839 We don't output a traceback table if -finhibit-size-directive was
28840 used. The documentation for -finhibit-size-directive reads
28841 ``don't output a @code{.size} assembler directive, or anything
28842 else that would cause trouble if the function is split in the
28843 middle, and the two halves are placed at locations far apart in
28844 memory.'' The traceback table has this property, since it
28845 includes the offset from the start of the function to the
28846 traceback table itself.
28848 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28849 different traceback table. */
28850 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28851 && ! flag_inhibit_size_directive
28852 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
28854 const char *fname
= NULL
;
28855 const char *language_string
= lang_hooks
.name
;
28856 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
28858 int optional_tbtab
;
28859 rs6000_stack_t
*info
= rs6000_stack_info ();
28861 if (rs6000_traceback
== traceback_full
)
28862 optional_tbtab
= 1;
28863 else if (rs6000_traceback
== traceback_part
)
28864 optional_tbtab
= 0;
28866 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
28868 if (optional_tbtab
)
28870 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
28871 while (*fname
== '.') /* V.4 encodes . in the name */
28874 /* Need label immediately before tbtab, so we can compute
28875 its offset from the function start. */
28876 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28877 ASM_OUTPUT_LABEL (file
, fname
);
28880 /* The .tbtab pseudo-op can only be used for the first eight
28881 expressions, since it can't handle the possibly variable
28882 length fields that follow. However, if you omit the optional
28883 fields, the assembler outputs zeros for all optional fields
28884 anyways, giving each variable length field is minimum length
28885 (as defined in sys/debug.h). Thus we can not use the .tbtab
28886 pseudo-op at all. */
28888 /* An all-zero word flags the start of the tbtab, for debuggers
28889 that have to find it by searching forward from the entry
28890 point or from the current pc. */
28891 fputs ("\t.long 0\n", file
);
28893 /* Tbtab format type. Use format type 0. */
28894 fputs ("\t.byte 0,", file
);
28896 /* Language type. Unfortunately, there does not seem to be any
28897 official way to discover the language being compiled, so we
28898 use language_string.
28899 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28900 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28901 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28902 numbers either, so for now use 0. */
28904 || ! strcmp (language_string
, "GNU GIMPLE")
28905 || ! strcmp (language_string
, "GNU Go")
28906 || ! strcmp (language_string
, "GNU D")
28907 || ! strcmp (language_string
, "libgccjit"))
28909 else if (! strcmp (language_string
, "GNU F77")
28910 || lang_GNU_Fortran ())
28912 else if (! strcmp (language_string
, "GNU Ada"))
28914 else if (lang_GNU_CXX ()
28915 || ! strcmp (language_string
, "GNU Objective-C++"))
28917 else if (! strcmp (language_string
, "GNU Java"))
28919 else if (! strcmp (language_string
, "GNU Objective-C"))
28922 gcc_unreachable ();
28923 fprintf (file
, "%d,", i
);
28925 /* 8 single bit fields: global linkage (not set for C extern linkage,
28926 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28927 from start of procedure stored in tbtab, internal function, function
28928 has controlled storage, function has no toc, function uses fp,
28929 function logs/aborts fp operations. */
28930 /* Assume that fp operations are used if any fp reg must be saved. */
28931 fprintf (file
, "%d,",
28932 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
28934 /* 6 bitfields: function is interrupt handler, name present in
28935 proc table, function calls alloca, on condition directives
28936 (controls stack walks, 3 bits), saves condition reg, saves
28938 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28939 set up as a frame pointer, even when there is no alloca call. */
28940 fprintf (file
, "%d,",
28941 ((optional_tbtab
<< 6)
28942 | ((optional_tbtab
& frame_pointer_needed
) << 5)
28943 | (info
->cr_save_p
<< 1)
28944 | (info
->lr_save_p
)));
28946 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28948 fprintf (file
, "%d,",
28949 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
28951 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28952 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
28954 if (optional_tbtab
)
28956 /* Compute the parameter info from the function decl argument
28959 int next_parm_info_bit
= 31;
28961 for (decl
= DECL_ARGUMENTS (current_function_decl
);
28962 decl
; decl
= DECL_CHAIN (decl
))
28964 rtx parameter
= DECL_INCOMING_RTL (decl
);
28965 machine_mode mode
= GET_MODE (parameter
);
28967 if (GET_CODE (parameter
) == REG
)
28969 if (SCALAR_FLOAT_MODE_P (mode
))
28992 gcc_unreachable ();
28995 /* If only one bit will fit, don't or in this entry. */
28996 if (next_parm_info_bit
> 0)
28997 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
28998 next_parm_info_bit
-= 2;
29002 fixed_parms
+= ((GET_MODE_SIZE (mode
)
29003 + (UNITS_PER_WORD
- 1))
29005 next_parm_info_bit
-= 1;
29011 /* Number of fixed point parameters. */
29012 /* This is actually the number of words of fixed point parameters; thus
29013 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29014 fprintf (file
, "%d,", fixed_parms
);
29016 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29018 /* This is actually the number of fp registers that hold parameters;
29019 and thus the maximum value is 13. */
29020 /* Set parameters on stack bit if parameters are not in their original
29021 registers, regardless of whether they are on the stack? Xlc
29022 seems to set the bit when not optimizing. */
29023 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
29025 if (optional_tbtab
)
29027 /* Optional fields follow. Some are variable length. */
29029 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29030 float, 11 double float. */
29031 /* There is an entry for each parameter in a register, in the order
29032 that they occur in the parameter list. Any intervening arguments
29033 on the stack are ignored. If the list overflows a long (max
29034 possible length 34 bits) then completely leave off all elements
29036 /* Only emit this long if there was at least one parameter. */
29037 if (fixed_parms
|| float_parms
)
29038 fprintf (file
, "\t.long %d\n", parm_info
);
29040 /* Offset from start of code to tb table. */
29041 fputs ("\t.long ", file
);
29042 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
29043 RS6000_OUTPUT_BASENAME (file
, fname
);
29045 rs6000_output_function_entry (file
, fname
);
29048 /* Interrupt handler mask. */
29049 /* Omit this long, since we never set the interrupt handler bit
29052 /* Number of CTL (controlled storage) anchors. */
29053 /* Omit this long, since the has_ctl bit is never set above. */
29055 /* Displacement into stack of each CTL anchor. */
29056 /* Omit this list of longs, because there are no CTL anchors. */
29058 /* Length of function name. */
29061 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
29063 /* Function name. */
29064 assemble_string (fname
, strlen (fname
));
29066 /* Register for alloca automatic storage; this is always reg 31.
29067 Only emit this if the alloca bit was set above. */
29068 if (frame_pointer_needed
)
29069 fputs ("\t.byte 31\n", file
);
29071 fputs ("\t.align 2\n", file
);
29075 /* Arrange to define .LCTOC1 label, if not already done. */
29079 if (!toc_initialized
)
29081 switch_to_section (toc_section
);
29082 switch_to_section (current_function_section ());
29087 /* -fsplit-stack support. */
29089 /* A SYMBOL_REF for __morestack. */
29090 static GTY(()) rtx morestack_ref
;
29093 gen_add3_const (rtx rt
, rtx ra
, long c
)
29096 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
29098 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
29101 /* Emit -fsplit-stack prologue, which goes before the regular function
29102 prologue (at local entry point in the case of ELFv2). */
29105 rs6000_expand_split_stack_prologue (void)
29107 rs6000_stack_t
*info
= rs6000_stack_info ();
29108 unsigned HOST_WIDE_INT allocate
;
29109 long alloc_hi
, alloc_lo
;
29110 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
29113 gcc_assert (flag_split_stack
&& reload_completed
);
29118 if (global_regs
[29])
29120 error ("%qs uses register r29", "-fsplit-stack");
29121 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
29122 "conflicts with %qD", global_regs_decl
[29]);
29125 allocate
= info
->total_size
;
29126 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
29128 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29131 if (morestack_ref
== NULL_RTX
)
29133 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
29134 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
29135 | SYMBOL_FLAG_FUNCTION
);
29138 r0
= gen_rtx_REG (Pmode
, 0);
29139 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29140 r12
= gen_rtx_REG (Pmode
, 12);
29141 emit_insn (gen_load_split_stack_limit (r0
));
29142 /* Always emit two insns here to calculate the requested stack,
29143 so that the linker can edit them when adjusting size for calling
29144 non-split-stack code. */
29145 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
29146 alloc_lo
= -allocate
- alloc_hi
;
29149 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
29151 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
29153 emit_insn (gen_nop ());
29157 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
29158 emit_insn (gen_nop ());
29161 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
29162 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
29163 ok_label
= gen_label_rtx ();
29164 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29165 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
29166 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
29168 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29169 JUMP_LABEL (insn
) = ok_label
;
29170 /* Mark the jump as very likely to be taken. */
29171 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
29173 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
29174 insn
= emit_move_insn (r0
, lr
);
29175 RTX_FRAME_RELATED_P (insn
) = 1;
29176 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
29177 RTX_FRAME_RELATED_P (insn
) = 1;
29179 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
29180 const0_rtx
, const0_rtx
));
29181 call_fusage
= NULL_RTX
;
29182 use_reg (&call_fusage
, r12
);
29183 /* Say the call uses r0, even though it doesn't, to stop regrename
29184 from twiddling with the insns saving lr, trashing args for cfun.
29185 The insns restoring lr are similarly protected by making
29186 split_stack_return use r0. */
29187 use_reg (&call_fusage
, r0
);
29188 add_function_usage_to (insn
, call_fusage
);
29189 /* Indicate that this function can't jump to non-local gotos. */
29190 make_reg_eh_region_note_nothrow_nononlocal (insn
);
29191 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
29192 insn
= emit_move_insn (lr
, r0
);
29193 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
29194 RTX_FRAME_RELATED_P (insn
) = 1;
29195 emit_insn (gen_split_stack_return ());
29197 emit_label (ok_label
);
29198 LABEL_NUSES (ok_label
) = 1;
29201 /* Return the internal arg pointer used for function incoming
29202 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29203 to copy it to a pseudo in order for it to be preserved over calls
29204 and suchlike. We'd really like to use a pseudo here for the
29205 internal arg pointer but data-flow analysis is not prepared to
29206 accept pseudos as live at the beginning of a function. */
29209 rs6000_internal_arg_pointer (void)
29211 if (flag_split_stack
29212 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
29216 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
29220 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
29221 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
29223 /* Put the pseudo initialization right after the note at the
29224 beginning of the function. */
29225 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
29226 gen_rtx_REG (Pmode
, 12));
29227 push_topmost_sequence ();
29228 emit_insn_after (pat
, get_insns ());
29229 pop_topmost_sequence ();
29231 rtx ret
= plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
29232 FIRST_PARM_OFFSET (current_function_decl
));
29233 return copy_to_reg (ret
);
29235 return virtual_incoming_args_rtx
;
29238 /* We may have to tell the dataflow pass that the split stack prologue
29239 is initializing a register. */
29242 rs6000_live_on_entry (bitmap regs
)
29244 if (flag_split_stack
)
29245 bitmap_set_bit (regs
, 12);
29248 /* Emit -fsplit-stack dynamic stack allocation space check. */
29251 rs6000_split_stack_space_check (rtx size
, rtx label
)
29253 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29254 rtx limit
= gen_reg_rtx (Pmode
);
29255 rtx requested
= gen_reg_rtx (Pmode
);
29256 rtx cmp
= gen_reg_rtx (CCUNSmode
);
29259 emit_insn (gen_load_split_stack_limit (limit
));
29260 if (CONST_INT_P (size
))
29261 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
29264 size
= force_reg (Pmode
, size
);
29265 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
29267 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
29268 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29269 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
29270 gen_rtx_LABEL_REF (VOIDmode
, label
),
29272 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29273 JUMP_LABEL (jump
) = label
;
29276 /* A C compound statement that outputs the assembler code for a thunk
29277 function, used to implement C++ virtual function calls with
29278 multiple inheritance. The thunk acts as a wrapper around a virtual
29279 function, adjusting the implicit object parameter before handing
29280 control off to the real function.
29282 First, emit code to add the integer DELTA to the location that
29283 contains the incoming first argument. Assume that this argument
29284 contains a pointer, and is the one used to pass the `this' pointer
29285 in C++. This is the incoming argument *before* the function
29286 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29287 values of all other incoming arguments.
29289 After the addition, emit code to jump to FUNCTION, which is a
29290 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29291 not touch the return address. Hence returning from FUNCTION will
29292 return to whoever called the current `thunk'.
29294 The effect must be as if FUNCTION had been called directly with the
29295 adjusted first argument. This macro is responsible for emitting
29296 all of the code for a thunk function; output_function_prologue()
29297 and output_function_epilogue() are not invoked.
29299 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29300 been extracted from it.) It might possibly be useful on some
29301 targets, but probably not.
29303 If you do not define this macro, the target-independent code in the
29304 C++ frontend will generate a less efficient heavyweight thunk that
29305 calls FUNCTION instead of jumping to it. The generic approach does
29306 not support varargs. */
29309 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
29310 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
29313 rtx this_rtx
, funexp
;
29316 reload_completed
= 1;
29317 epilogue_completed
= 1;
29319 /* Mark the end of the (empty) prologue. */
29320 emit_note (NOTE_INSN_PROLOGUE_END
);
29322 /* Find the "this" pointer. If the function returns a structure,
29323 the structure return pointer is in r3. */
29324 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
29325 this_rtx
= gen_rtx_REG (Pmode
, 4);
29327 this_rtx
= gen_rtx_REG (Pmode
, 3);
29329 /* Apply the constant offset, if required. */
29331 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
29333 /* Apply the offset from the vtable, if required. */
29336 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
29337 rtx tmp
= gen_rtx_REG (Pmode
, 12);
29339 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
29340 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
29342 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
29343 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
29347 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
29349 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
29351 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
29354 /* Generate a tail call to the target function. */
29355 if (!TREE_USED (function
))
29357 assemble_external (function
);
29358 TREE_USED (function
) = 1;
29360 funexp
= XEXP (DECL_RTL (function
), 0);
29361 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
29364 if (MACHOPIC_INDIRECT
)
29365 funexp
= machopic_indirect_call_target (funexp
);
29368 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29369 generate sibcall RTL explicitly. */
29370 insn
= emit_call_insn (
29371 gen_rtx_PARALLEL (VOIDmode
,
29373 gen_rtx_CALL (VOIDmode
,
29374 funexp
, const0_rtx
),
29375 gen_rtx_USE (VOIDmode
, const0_rtx
),
29376 simple_return_rtx
)));
29377 SIBLING_CALL_P (insn
) = 1;
29380 /* Run just enough of rest_of_compilation to get the insns emitted.
29381 There's not really enough bulk here to make other passes such as
29382 instruction scheduling worth while. Note that use_thunk calls
29383 assemble_start_function and assemble_end_function. */
29384 insn
= get_insns ();
29385 shorten_branches (insn
);
29386 final_start_function (insn
, file
, 1);
29387 final (insn
, file
, 1);
29388 final_end_function ();
29390 reload_completed
= 0;
29391 epilogue_completed
= 0;
29394 /* A quick summary of the various types of 'constant-pool tables'
29397 Target Flags Name One table per
29398 AIX (none) AIX TOC object file
29399 AIX -mfull-toc AIX TOC object file
29400 AIX -mminimal-toc AIX minimal TOC translation unit
29401 SVR4/EABI (none) SVR4 SDATA object file
29402 SVR4/EABI -fpic SVR4 pic object file
29403 SVR4/EABI -fPIC SVR4 PIC translation unit
29404 SVR4/EABI -mrelocatable EABI TOC function
29405 SVR4/EABI -maix AIX TOC object file
29406 SVR4/EABI -maix -mminimal-toc
29407 AIX minimal TOC translation unit
29409 Name Reg. Set by entries contains:
29410 made by addrs? fp? sum?
29412 AIX TOC 2 crt0 as Y option option
29413 AIX minimal TOC 30 prolog gcc Y Y option
29414 SVR4 SDATA 13 crt0 gcc N Y N
29415 SVR4 pic 30 prolog ld Y not yet N
29416 SVR4 PIC 30 prolog gcc Y option option
29417 EABI TOC 30 prolog gcc Y option option
29421 /* Hash functions for the hash table. */
29424 rs6000_hash_constant (rtx k
)
29426 enum rtx_code code
= GET_CODE (k
);
29427 machine_mode mode
= GET_MODE (k
);
29428 unsigned result
= (code
<< 3) ^ mode
;
29429 const char *format
;
29432 format
= GET_RTX_FORMAT (code
);
29433 flen
= strlen (format
);
29439 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29441 case CONST_WIDE_INT
:
29444 flen
= CONST_WIDE_INT_NUNITS (k
);
29445 for (i
= 0; i
< flen
; i
++)
29446 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29451 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29461 for (; fidx
< flen
; fidx
++)
29462 switch (format
[fidx
])
29467 const char *str
= XSTR (k
, fidx
);
29468 len
= strlen (str
);
29469 result
= result
* 613 + len
;
29470 for (i
= 0; i
< len
; i
++)
29471 result
= result
* 613 + (unsigned) str
[i
];
29476 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29480 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29483 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29484 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29488 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29489 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29496 gcc_unreachable ();
29503 toc_hasher::hash (toc_hash_struct
*thc
)
29505 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29508 /* Compare H1 and H2 for equivalence. */
29511 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29516 if (h1
->key_mode
!= h2
->key_mode
)
29519 return rtx_equal_p (r1
, r2
);
29522 /* These are the names given by the C++ front-end to vtables, and
29523 vtable-like objects. Ideally, this logic should not be here;
29524 instead, there should be some programmatic way of inquiring as
29525 to whether or not an object is a vtable. */
29527 #define VTABLE_NAME_P(NAME) \
29528 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29529 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29530 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29531 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29532 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29534 #ifdef NO_DOLLAR_IN_LABEL
29535 /* Return a GGC-allocated character string translating dollar signs in
29536 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29539 rs6000_xcoff_strip_dollar (const char *name
)
29545 q
= (const char *) strchr (name
, '$');
29547 if (q
== 0 || q
== name
)
29550 len
= strlen (name
);
29551 strip
= XALLOCAVEC (char, len
+ 1);
29552 strcpy (strip
, name
);
29553 p
= strip
+ (q
- name
);
29557 p
= strchr (p
+ 1, '$');
29560 return ggc_alloc_string (strip
, len
);
29565 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29567 const char *name
= XSTR (x
, 0);
29569 /* Currently C++ toc references to vtables can be emitted before it
29570 is decided whether the vtable is public or private. If this is
29571 the case, then the linker will eventually complain that there is
29572 a reference to an unknown section. Thus, for vtables only,
29573 we emit the TOC reference to reference the identifier and not the
29575 if (VTABLE_NAME_P (name
))
29577 RS6000_OUTPUT_BASENAME (file
, name
);
29580 assemble_name (file
, name
);
29583 /* Output a TOC entry. We derive the entry name from what is being
29587 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29590 const char *name
= buf
;
29592 HOST_WIDE_INT offset
= 0;
29594 gcc_assert (!TARGET_NO_TOC
);
29596 /* When the linker won't eliminate them, don't output duplicate
29597 TOC entries (this happens on AIX if there is any kind of TOC,
29598 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29600 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29602 struct toc_hash_struct
*h
;
29604 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29605 time because GGC is not initialized at that point. */
29606 if (toc_hash_table
== NULL
)
29607 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29609 h
= ggc_alloc
<toc_hash_struct
> ();
29611 h
->key_mode
= mode
;
29612 h
->labelno
= labelno
;
29614 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29615 if (*found
== NULL
)
29617 else /* This is indeed a duplicate.
29618 Set this label equal to that label. */
29620 fputs ("\t.set ", file
);
29621 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29622 fprintf (file
, "%d,", labelno
);
29623 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29624 fprintf (file
, "%d\n", ((*found
)->labelno
));
29627 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29628 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29629 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29631 fputs ("\t.set ", file
);
29632 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29633 fprintf (file
, "%d,", labelno
);
29634 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29635 fprintf (file
, "%d\n", ((*found
)->labelno
));
29642 /* If we're going to put a double constant in the TOC, make sure it's
29643 aligned properly when strict alignment is on. */
29644 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29645 && STRICT_ALIGNMENT
29646 && GET_MODE_BITSIZE (mode
) >= 64
29647 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29648 ASM_OUTPUT_ALIGN (file
, 3);
29651 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29653 /* Handle FP constants specially. Note that if we have a minimal
29654 TOC, things we put here aren't actually in the TOC, so we can allow
29656 if (CONST_DOUBLE_P (x
)
29657 && (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29658 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29662 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29663 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29665 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29669 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29670 fputs (DOUBLE_INT_ASM_OP
, file
);
29672 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29673 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29674 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29675 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29676 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29677 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29678 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29679 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
29684 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29685 fputs ("\t.long ", file
);
29687 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29688 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29689 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29690 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29691 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29692 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29696 else if (CONST_DOUBLE_P (x
)
29697 && (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
29701 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29702 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29704 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29708 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29709 fputs (DOUBLE_INT_ASM_OP
, file
);
29711 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29712 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29713 fprintf (file
, "0x%lx%08lx\n",
29714 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29715 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
29720 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29721 fputs ("\t.long ", file
);
29723 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29724 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29725 fprintf (file
, "0x%lx,0x%lx\n",
29726 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29730 else if (CONST_DOUBLE_P (x
)
29731 && (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
29735 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29736 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29738 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29742 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29743 fputs (DOUBLE_INT_ASM_OP
, file
);
29745 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29746 if (WORDS_BIG_ENDIAN
)
29747 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
29749 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29754 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29755 fputs ("\t.long ", file
);
29757 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29758 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29762 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
29764 unsigned HOST_WIDE_INT low
;
29765 HOST_WIDE_INT high
;
29767 low
= INTVAL (x
) & 0xffffffff;
29768 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
29770 /* TOC entries are always Pmode-sized, so when big-endian
29771 smaller integer constants in the TOC need to be padded.
29772 (This is still a win over putting the constants in
29773 a separate constant pool, because then we'd have
29774 to have both a TOC entry _and_ the actual constant.)
29776 For a 32-bit target, CONST_INT values are loaded and shifted
29777 entirely within `low' and can be stored in one TOC entry. */
29779 /* It would be easy to make this work, but it doesn't now. */
29780 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
29782 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
29785 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
29786 high
= (HOST_WIDE_INT
) low
>> 32;
29792 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29793 fputs (DOUBLE_INT_ASM_OP
, file
);
29795 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29796 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29797 fprintf (file
, "0x%lx%08lx\n",
29798 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29803 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
29805 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29806 fputs ("\t.long ", file
);
29808 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29809 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29810 fprintf (file
, "0x%lx,0x%lx\n",
29811 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29815 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29816 fputs ("\t.long ", file
);
29818 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
29819 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
29825 if (GET_CODE (x
) == CONST
)
29827 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
29828 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
29830 base
= XEXP (XEXP (x
, 0), 0);
29831 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
29834 switch (GET_CODE (base
))
29837 name
= XSTR (base
, 0);
29841 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
29842 CODE_LABEL_NUMBER (XEXP (base
, 0)));
29846 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
29850 gcc_unreachable ();
29853 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29854 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
29857 fputs ("\t.tc ", file
);
29858 RS6000_OUTPUT_BASENAME (file
, name
);
29861 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
29863 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
29865 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29866 after other TOC symbols, reducing overflow of small TOC access
29867 to [TC] symbols. */
29868 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
29869 ? "[TE]," : "[TC],", file
);
29872 /* Currently C++ toc references to vtables can be emitted before it
29873 is decided whether the vtable is public or private. If this is
29874 the case, then the linker will eventually complain that there is
29875 a TOC reference to an unknown section. Thus, for vtables only,
29876 we emit the TOC reference to reference the symbol and not the
29878 if (VTABLE_NAME_P (name
))
29880 RS6000_OUTPUT_BASENAME (file
, name
);
29882 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
29883 else if (offset
> 0)
29884 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
29887 output_addr_const (file
, x
);
29890 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
29892 switch (SYMBOL_REF_TLS_MODEL (base
))
29896 case TLS_MODEL_LOCAL_EXEC
:
29897 fputs ("@le", file
);
29899 case TLS_MODEL_INITIAL_EXEC
:
29900 fputs ("@ie", file
);
29902 /* Use global-dynamic for local-dynamic. */
29903 case TLS_MODEL_GLOBAL_DYNAMIC
:
29904 case TLS_MODEL_LOCAL_DYNAMIC
:
29906 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
29907 fputs ("\t.tc .", file
);
29908 RS6000_OUTPUT_BASENAME (file
, name
);
29909 fputs ("[TC],", file
);
29910 output_addr_const (file
, x
);
29911 fputs ("@m", file
);
29914 gcc_unreachable ();
29922 /* Output an assembler pseudo-op to write an ASCII string of N characters
29923 starting at P to FILE.
29925 On the RS/6000, we have to do this using the .byte operation and
29926 write out special characters outside the quoted string.
29927 Also, the assembler is broken; very long strings are truncated,
29928 so we must artificially break them up early. */
29931 output_ascii (FILE *file
, const char *p
, int n
)
29934 int i
, count_string
;
29935 const char *for_string
= "\t.byte \"";
29936 const char *for_decimal
= "\t.byte ";
29937 const char *to_close
= NULL
;
29940 for (i
= 0; i
< n
; i
++)
29943 if (c
>= ' ' && c
< 0177)
29946 fputs (for_string
, file
);
29949 /* Write two quotes to get one. */
29957 for_decimal
= "\"\n\t.byte ";
29961 if (count_string
>= 512)
29963 fputs (to_close
, file
);
29965 for_string
= "\t.byte \"";
29966 for_decimal
= "\t.byte ";
29974 fputs (for_decimal
, file
);
29975 fprintf (file
, "%d", c
);
29977 for_string
= "\n\t.byte \"";
29978 for_decimal
= ", ";
29984 /* Now close the string if we have written one. Then end the line. */
29986 fputs (to_close
, file
);
29989 /* Generate a unique section name for FILENAME for a section type
29990 represented by SECTION_DESC. Output goes into BUF.
29992 SECTION_DESC can be any string, as long as it is different for each
29993 possible section type.
29995 We name the section in the same manner as xlc. The name begins with an
29996 underscore followed by the filename (after stripping any leading directory
29997 names) with the last period replaced by the string SECTION_DESC. If
29998 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30002 rs6000_gen_section_name (char **buf
, const char *filename
,
30003 const char *section_desc
)
30005 const char *q
, *after_last_slash
, *last_period
= 0;
30009 after_last_slash
= filename
;
30010 for (q
= filename
; *q
; q
++)
30013 after_last_slash
= q
+ 1;
30014 else if (*q
== '.')
30018 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
30019 *buf
= (char *) xmalloc (len
);
30024 for (q
= after_last_slash
; *q
; q
++)
30026 if (q
== last_period
)
30028 strcpy (p
, section_desc
);
30029 p
+= strlen (section_desc
);
30033 else if (ISALNUM (*q
))
30037 if (last_period
== 0)
30038 strcpy (p
, section_desc
);
30043 /* Emit profile function. */
30046 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
30048 /* Non-standard profiling for kernels, which just saves LR then calls
30049 _mcount without worrying about arg saves. The idea is to change
30050 the function prologue as little as possible as it isn't easy to
30051 account for arg save/restore code added just for _mcount. */
30052 if (TARGET_PROFILE_KERNEL
)
30055 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
30057 #ifndef NO_PROFILE_COUNTERS
30058 # define NO_PROFILE_COUNTERS 0
30060 if (NO_PROFILE_COUNTERS
)
30061 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30062 LCT_NORMAL
, VOIDmode
);
30066 const char *label_name
;
30069 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30070 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
30071 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
30073 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
30074 LCT_NORMAL
, VOIDmode
, fun
, Pmode
);
30077 else if (DEFAULT_ABI
== ABI_DARWIN
)
30079 const char *mcount_name
= RS6000_MCOUNT
;
30080 int caller_addr_regno
= LR_REGNO
;
30082 /* Be conservative and always set this, at least for now. */
30083 crtl
->uses_pic_offset_table
= 1;
30086 /* For PIC code, set up a stub and collect the caller's address
30087 from r0, which is where the prologue puts it. */
30088 if (MACHOPIC_INDIRECT
30089 && crtl
->uses_pic_offset_table
)
30090 caller_addr_regno
= 0;
30092 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
30093 LCT_NORMAL
, VOIDmode
,
30094 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
30098 /* Write function profiler code. */
30101 output_function_profiler (FILE *file
, int labelno
)
30105 switch (DEFAULT_ABI
)
30108 gcc_unreachable ();
30113 warning (0, "no profiling of 64-bit code for this ABI");
30116 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
30117 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
30118 if (NO_PROFILE_COUNTERS
)
30120 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30121 reg_names
[0], reg_names
[1]);
30123 else if (TARGET_SECURE_PLT
&& flag_pic
)
30125 if (TARGET_LINK_STACK
)
30128 get_ppc476_thunk_name (name
);
30129 asm_fprintf (file
, "\tbl %s\n", name
);
30132 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
30133 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30134 reg_names
[0], reg_names
[1]);
30135 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30136 asm_fprintf (file
, "\taddis %s,%s,",
30137 reg_names
[12], reg_names
[12]);
30138 assemble_name (file
, buf
);
30139 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
30140 assemble_name (file
, buf
);
30141 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
30143 else if (flag_pic
== 1)
30145 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
30146 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30147 reg_names
[0], reg_names
[1]);
30148 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30149 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
30150 assemble_name (file
, buf
);
30151 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
30153 else if (flag_pic
> 1)
30155 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30156 reg_names
[0], reg_names
[1]);
30157 /* Now, we need to get the address of the label. */
30158 if (TARGET_LINK_STACK
)
30161 get_ppc476_thunk_name (name
);
30162 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
30163 assemble_name (file
, buf
);
30164 fputs ("-.\n1:", file
);
30165 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30166 asm_fprintf (file
, "\taddi %s,%s,4\n",
30167 reg_names
[11], reg_names
[11]);
30171 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
30172 assemble_name (file
, buf
);
30173 fputs ("-.\n1:", file
);
30174 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30176 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
30177 reg_names
[0], reg_names
[11]);
30178 asm_fprintf (file
, "\tadd %s,%s,%s\n",
30179 reg_names
[0], reg_names
[0], reg_names
[11]);
30183 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
30184 assemble_name (file
, buf
);
30185 fputs ("@ha\n", file
);
30186 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30187 reg_names
[0], reg_names
[1]);
30188 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
30189 assemble_name (file
, buf
);
30190 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
30193 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30194 fprintf (file
, "\tbl %s%s\n",
30195 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
30201 /* Don't do anything, done in output_profile_hook (). */
30208 /* The following variable value is the last issued insn. */
30210 static rtx_insn
*last_scheduled_insn
;
30212 /* The following variable helps to balance issuing of load and
30213 store instructions */
30215 static int load_store_pendulum
;
30217 /* The following variable helps pair divide insns during scheduling. */
30218 static int divide_cnt
;
30219 /* The following variable helps pair and alternate vector and vector load
30220 insns during scheduling. */
30221 static int vec_pairing
;
30224 /* Power4 load update and store update instructions are cracked into a
30225 load or store and an integer insn which are executed in the same cycle.
30226 Branches have their own dispatch slot which does not count against the
30227 GCC issue rate, but it changes the program flow so there are no other
30228 instructions to issue in this cycle. */
30231 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
30233 last_scheduled_insn
= insn
;
30234 if (GET_CODE (PATTERN (insn
)) == USE
30235 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30237 cached_can_issue_more
= more
;
30238 return cached_can_issue_more
;
30241 if (insn_terminates_group_p (insn
, current_group
))
30243 cached_can_issue_more
= 0;
30244 return cached_can_issue_more
;
30247 /* If no reservation, but reach here */
30248 if (recog_memoized (insn
) < 0)
30251 if (rs6000_sched_groups
)
30253 if (is_microcoded_insn (insn
))
30254 cached_can_issue_more
= 0;
30255 else if (is_cracked_insn (insn
))
30256 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
30258 cached_can_issue_more
= more
- 1;
30260 return cached_can_issue_more
;
30263 if (rs6000_tune
== PROCESSOR_CELL
&& is_nonpipeline_insn (insn
))
30266 cached_can_issue_more
= more
- 1;
30267 return cached_can_issue_more
;
30271 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
30273 int r
= rs6000_variable_issue_1 (insn
, more
);
30275 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
30279 /* Adjust the cost of a scheduling dependency. Return the new cost of
30280 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30283 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
30286 enum attr_type attr_type
;
30288 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
30295 /* Data dependency; DEP_INSN writes a register that INSN reads
30296 some cycles later. */
30298 /* Separate a load from a narrower, dependent store. */
30299 if ((rs6000_sched_groups
|| rs6000_tune
== PROCESSOR_POWER9
)
30300 && GET_CODE (PATTERN (insn
)) == SET
30301 && GET_CODE (PATTERN (dep_insn
)) == SET
30302 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
30303 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
30304 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
30305 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
30308 attr_type
= get_attr_type (insn
);
30313 /* Tell the first scheduling pass about the latency between
30314 a mtctr and bctr (and mtlr and br/blr). The first
30315 scheduling pass will not know about this latency since
30316 the mtctr instruction, which has the latency associated
30317 to it, will be generated by reload. */
30320 /* Leave some extra cycles between a compare and its
30321 dependent branch, to inhibit expensive mispredicts. */
30322 if ((rs6000_tune
== PROCESSOR_PPC603
30323 || rs6000_tune
== PROCESSOR_PPC604
30324 || rs6000_tune
== PROCESSOR_PPC604e
30325 || rs6000_tune
== PROCESSOR_PPC620
30326 || rs6000_tune
== PROCESSOR_PPC630
30327 || rs6000_tune
== PROCESSOR_PPC750
30328 || rs6000_tune
== PROCESSOR_PPC7400
30329 || rs6000_tune
== PROCESSOR_PPC7450
30330 || rs6000_tune
== PROCESSOR_PPCE5500
30331 || rs6000_tune
== PROCESSOR_PPCE6500
30332 || rs6000_tune
== PROCESSOR_POWER4
30333 || rs6000_tune
== PROCESSOR_POWER5
30334 || rs6000_tune
== PROCESSOR_POWER7
30335 || rs6000_tune
== PROCESSOR_POWER8
30336 || rs6000_tune
== PROCESSOR_POWER9
30337 || rs6000_tune
== PROCESSOR_CELL
)
30338 && recog_memoized (dep_insn
)
30339 && (INSN_CODE (dep_insn
) >= 0))
30341 switch (get_attr_type (dep_insn
))
30344 case TYPE_FPCOMPARE
:
30345 case TYPE_CR_LOGICAL
:
30349 if (get_attr_dot (dep_insn
) == DOT_YES
)
30354 if (get_attr_dot (dep_insn
) == DOT_YES
30355 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
30366 if ((rs6000_tune
== PROCESSOR_POWER6
)
30367 && recog_memoized (dep_insn
)
30368 && (INSN_CODE (dep_insn
) >= 0))
30371 if (GET_CODE (PATTERN (insn
)) != SET
)
30372 /* If this happens, we have to extend this to schedule
30373 optimally. Return default for now. */
30376 /* Adjust the cost for the case where the value written
30377 by a fixed point operation is used as the address
30378 gen value on a store. */
30379 switch (get_attr_type (dep_insn
))
30384 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30385 return get_attr_sign_extend (dep_insn
)
30386 == SIGN_EXTEND_YES
? 6 : 4;
30391 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30392 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30402 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30410 if (get_attr_update (dep_insn
) == UPDATE_YES
30411 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30417 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30423 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30424 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30434 if ((rs6000_tune
== PROCESSOR_POWER6
)
30435 && recog_memoized (dep_insn
)
30436 && (INSN_CODE (dep_insn
) >= 0))
30439 /* Adjust the cost for the case where the value written
30440 by a fixed point instruction is used within the address
30441 gen portion of a subsequent load(u)(x) */
30442 switch (get_attr_type (dep_insn
))
30447 if (set_to_load_agen (dep_insn
, insn
))
30448 return get_attr_sign_extend (dep_insn
)
30449 == SIGN_EXTEND_YES
? 6 : 4;
30454 if (set_to_load_agen (dep_insn
, insn
))
30455 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30465 if (set_to_load_agen (dep_insn
, insn
))
30473 if (get_attr_update (dep_insn
) == UPDATE_YES
30474 && set_to_load_agen (dep_insn
, insn
))
30480 if (set_to_load_agen (dep_insn
, insn
))
30486 if (set_to_load_agen (dep_insn
, insn
))
30487 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30497 if ((rs6000_tune
== PROCESSOR_POWER6
)
30498 && get_attr_update (insn
) == UPDATE_NO
30499 && recog_memoized (dep_insn
)
30500 && (INSN_CODE (dep_insn
) >= 0)
30501 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30508 /* Fall out to return default cost. */
30512 case REG_DEP_OUTPUT
:
30513 /* Output dependency; DEP_INSN writes a register that INSN writes some
30515 if ((rs6000_tune
== PROCESSOR_POWER6
)
30516 && recog_memoized (dep_insn
)
30517 && (INSN_CODE (dep_insn
) >= 0))
30519 attr_type
= get_attr_type (insn
);
30524 case TYPE_FPSIMPLE
:
30525 if (get_attr_type (dep_insn
) == TYPE_FP
30526 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30530 if (get_attr_update (insn
) == UPDATE_NO
30531 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30538 /* Fall through, no cost for output dependency. */
30542 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30547 gcc_unreachable ();
30553 /* Debug version of rs6000_adjust_cost. */
30556 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30557 int cost
, unsigned int dw
)
30559 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30567 default: dep
= "unknown depencency"; break;
30568 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30569 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30570 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30574 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30575 "%s, insn:\n", ret
, cost
, dep
);
30583 /* The function returns a true if INSN is microcoded.
30584 Return false otherwise. */
30587 is_microcoded_insn (rtx_insn
*insn
)
30589 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30590 || GET_CODE (PATTERN (insn
)) == USE
30591 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30594 if (rs6000_tune
== PROCESSOR_CELL
)
30595 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30597 if (rs6000_sched_groups
30598 && (rs6000_tune
== PROCESSOR_POWER4
|| rs6000_tune
== PROCESSOR_POWER5
))
30600 enum attr_type type
= get_attr_type (insn
);
30601 if ((type
== TYPE_LOAD
30602 && get_attr_update (insn
) == UPDATE_YES
30603 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30604 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30605 && get_attr_update (insn
) == UPDATE_YES
30606 && get_attr_indexed (insn
) == INDEXED_YES
)
30607 || type
== TYPE_MFCR
)
30614 /* The function returns true if INSN is cracked into 2 instructions
30615 by the processor (and therefore occupies 2 issue slots). */
30618 is_cracked_insn (rtx_insn
*insn
)
30620 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30621 || GET_CODE (PATTERN (insn
)) == USE
30622 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30625 if (rs6000_sched_groups
30626 && (rs6000_tune
== PROCESSOR_POWER4
|| rs6000_tune
== PROCESSOR_POWER5
))
30628 enum attr_type type
= get_attr_type (insn
);
30629 if ((type
== TYPE_LOAD
30630 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30631 && get_attr_update (insn
) == UPDATE_NO
)
30632 || (type
== TYPE_LOAD
30633 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30634 && get_attr_update (insn
) == UPDATE_YES
30635 && get_attr_indexed (insn
) == INDEXED_NO
)
30636 || (type
== TYPE_STORE
30637 && get_attr_update (insn
) == UPDATE_YES
30638 && get_attr_indexed (insn
) == INDEXED_NO
)
30639 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30640 && get_attr_update (insn
) == UPDATE_YES
)
30641 || (type
== TYPE_CR_LOGICAL
30642 && get_attr_cr_logical_3op (insn
) == CR_LOGICAL_3OP_YES
)
30643 || (type
== TYPE_EXTS
30644 && get_attr_dot (insn
) == DOT_YES
)
30645 || (type
== TYPE_SHIFT
30646 && get_attr_dot (insn
) == DOT_YES
30647 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30648 || (type
== TYPE_MUL
30649 && get_attr_dot (insn
) == DOT_YES
)
30650 || type
== TYPE_DIV
30651 || (type
== TYPE_INSERT
30652 && get_attr_size (insn
) == SIZE_32
))
30659 /* The function returns true if INSN can be issued only from
30660 the branch slot. */
30663 is_branch_slot_insn (rtx_insn
*insn
)
30665 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30666 || GET_CODE (PATTERN (insn
)) == USE
30667 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30670 if (rs6000_sched_groups
)
30672 enum attr_type type
= get_attr_type (insn
);
30673 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
30681 /* The function returns true if out_inst sets a value that is
30682 used in the address generation computation of in_insn */
30684 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
30686 rtx out_set
, in_set
;
30688 /* For performance reasons, only handle the simple case where
30689 both loads are a single_set. */
30690 out_set
= single_set (out_insn
);
30693 in_set
= single_set (in_insn
);
30695 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
30701 /* Try to determine base/offset/size parts of the given MEM.
30702 Return true if successful, false if all the values couldn't
30705 This function only looks for REG or REG+CONST address forms.
30706 REG+REG address form will return false. */
30709 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
30710 HOST_WIDE_INT
*size
)
30713 if MEM_SIZE_KNOWN_P (mem
)
30714 *size
= MEM_SIZE (mem
);
30718 addr_rtx
= (XEXP (mem
, 0));
30719 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
30720 addr_rtx
= XEXP (addr_rtx
, 1);
30723 while (GET_CODE (addr_rtx
) == PLUS
30724 && CONST_INT_P (XEXP (addr_rtx
, 1)))
30726 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
30727 addr_rtx
= XEXP (addr_rtx
, 0);
30729 if (!REG_P (addr_rtx
))
30736 /* The function returns true if the target storage location of
30737 mem1 is adjacent to the target storage location of mem2 */
30738 /* Return 1 if memory locations are adjacent. */
30741 adjacent_mem_locations (rtx mem1
, rtx mem2
)
30744 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30746 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30747 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30748 return ((REGNO (reg1
) == REGNO (reg2
))
30749 && ((off1
+ size1
== off2
)
30750 || (off2
+ size2
== off1
)));
30755 /* This function returns true if it can be determined that the two MEM
30756 locations overlap by at least 1 byte based on base reg/offset/size. */
30759 mem_locations_overlap (rtx mem1
, rtx mem2
)
30762 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30764 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30765 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30766 return ((REGNO (reg1
) == REGNO (reg2
))
30767 && (((off1
<= off2
) && (off1
+ size1
> off2
))
30768 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
30773 /* A C statement (sans semicolon) to update the integer scheduling
30774 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30775 INSN earlier, reduce the priority to execute INSN later. Do not
30776 define this macro if you do not need to adjust the scheduling
30777 priorities of insns. */
30780 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
30782 rtx load_mem
, str_mem
;
30783 /* On machines (like the 750) which have asymmetric integer units,
30784 where one integer unit can do multiply and divides and the other
30785 can't, reduce the priority of multiply/divide so it is scheduled
30786 before other integer operations. */
30789 if (! INSN_P (insn
))
30792 if (GET_CODE (PATTERN (insn
)) == USE
)
30795 switch (rs6000_tune
) {
30796 case PROCESSOR_PPC750
:
30797 switch (get_attr_type (insn
))
30804 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
30805 priority
, priority
);
30806 if (priority
>= 0 && priority
< 0x01000000)
30813 if (insn_must_be_first_in_group (insn
)
30814 && reload_completed
30815 && current_sched_info
->sched_max_insns_priority
30816 && rs6000_sched_restricted_insns_priority
)
30819 /* Prioritize insns that can be dispatched only in the first
30821 if (rs6000_sched_restricted_insns_priority
== 1)
30822 /* Attach highest priority to insn. This means that in
30823 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30824 precede 'priority' (critical path) considerations. */
30825 return current_sched_info
->sched_max_insns_priority
;
30826 else if (rs6000_sched_restricted_insns_priority
== 2)
30827 /* Increase priority of insn by a minimal amount. This means that in
30828 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30829 considerations precede dispatch-slot restriction considerations. */
30830 return (priority
+ 1);
30833 if (rs6000_tune
== PROCESSOR_POWER6
30834 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
30835 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
30836 /* Attach highest priority to insn if the scheduler has just issued two
30837 stores and this instruction is a load, or two loads and this instruction
30838 is a store. Power6 wants loads and stores scheduled alternately
30840 return current_sched_info
->sched_max_insns_priority
;
30845 /* Return true if the instruction is nonpipelined on the Cell. */
30847 is_nonpipeline_insn (rtx_insn
*insn
)
30849 enum attr_type type
;
30850 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30851 || GET_CODE (PATTERN (insn
)) == USE
30852 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30855 type
= get_attr_type (insn
);
30856 if (type
== TYPE_MUL
30857 || type
== TYPE_DIV
30858 || type
== TYPE_SDIV
30859 || type
== TYPE_DDIV
30860 || type
== TYPE_SSQRT
30861 || type
== TYPE_DSQRT
30862 || type
== TYPE_MFCR
30863 || type
== TYPE_MFCRF
30864 || type
== TYPE_MFJMPR
)
30872 /* Return how many instructions the machine can issue per cycle. */
30875 rs6000_issue_rate (void)
30877 /* Unless scheduling for register pressure, use issue rate of 1 for
30878 first scheduling pass to decrease degradation. */
30879 if (!reload_completed
&& !flag_sched_pressure
)
30882 switch (rs6000_tune
) {
30883 case PROCESSOR_RS64A
:
30884 case PROCESSOR_PPC601
: /* ? */
30885 case PROCESSOR_PPC7450
:
30887 case PROCESSOR_PPC440
:
30888 case PROCESSOR_PPC603
:
30889 case PROCESSOR_PPC750
:
30890 case PROCESSOR_PPC7400
:
30891 case PROCESSOR_PPC8540
:
30892 case PROCESSOR_PPC8548
:
30893 case PROCESSOR_CELL
:
30894 case PROCESSOR_PPCE300C2
:
30895 case PROCESSOR_PPCE300C3
:
30896 case PROCESSOR_PPCE500MC
:
30897 case PROCESSOR_PPCE500MC64
:
30898 case PROCESSOR_PPCE5500
:
30899 case PROCESSOR_PPCE6500
:
30900 case PROCESSOR_TITAN
:
30902 case PROCESSOR_PPC476
:
30903 case PROCESSOR_PPC604
:
30904 case PROCESSOR_PPC604e
:
30905 case PROCESSOR_PPC620
:
30906 case PROCESSOR_PPC630
:
30908 case PROCESSOR_POWER4
:
30909 case PROCESSOR_POWER5
:
30910 case PROCESSOR_POWER6
:
30911 case PROCESSOR_POWER7
:
30913 case PROCESSOR_POWER8
:
30915 case PROCESSOR_POWER9
:
30922 /* Return how many instructions to look ahead for better insn
30926 rs6000_use_sched_lookahead (void)
30928 switch (rs6000_tune
)
30930 case PROCESSOR_PPC8540
:
30931 case PROCESSOR_PPC8548
:
30934 case PROCESSOR_CELL
:
30935 return (reload_completed
? 8 : 0);
30942 /* We are choosing insn from the ready queue. Return zero if INSN can be
30945 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
30947 if (ready_index
== 0)
30950 if (rs6000_tune
!= PROCESSOR_CELL
)
30953 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
30955 if (!reload_completed
30956 || is_nonpipeline_insn (insn
)
30957 || is_microcoded_insn (insn
))
30963 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30964 and return true. */
30967 find_mem_ref (rtx pat
, rtx
*mem_ref
)
30972 /* stack_tie does not produce any real memory traffic. */
30973 if (tie_operand (pat
, VOIDmode
))
30976 if (GET_CODE (pat
) == MEM
)
30982 /* Recursively process the pattern. */
30983 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
30985 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
30989 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
30992 else if (fmt
[i
] == 'E')
30993 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
30995 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
31003 /* Determine if PAT is a PATTERN of a load insn. */
31006 is_load_insn1 (rtx pat
, rtx
*load_mem
)
31008 if (!pat
|| pat
== NULL_RTX
)
31011 if (GET_CODE (pat
) == SET
)
31012 return find_mem_ref (SET_SRC (pat
), load_mem
);
31014 if (GET_CODE (pat
) == PARALLEL
)
31018 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31019 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
31026 /* Determine if INSN loads from memory. */
31029 is_load_insn (rtx insn
, rtx
*load_mem
)
31031 if (!insn
|| !INSN_P (insn
))
31037 return is_load_insn1 (PATTERN (insn
), load_mem
);
31040 /* Determine if PAT is a PATTERN of a store insn. */
31043 is_store_insn1 (rtx pat
, rtx
*str_mem
)
31045 if (!pat
|| pat
== NULL_RTX
)
31048 if (GET_CODE (pat
) == SET
)
31049 return find_mem_ref (SET_DEST (pat
), str_mem
);
31051 if (GET_CODE (pat
) == PARALLEL
)
31055 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
31056 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
31063 /* Determine if INSN stores to memory. */
31066 is_store_insn (rtx insn
, rtx
*str_mem
)
31068 if (!insn
|| !INSN_P (insn
))
31071 return is_store_insn1 (PATTERN (insn
), str_mem
);
31074 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31077 is_power9_pairable_vec_type (enum attr_type type
)
31081 case TYPE_VECSIMPLE
:
31082 case TYPE_VECCOMPLEX
:
31086 case TYPE_VECFLOAT
:
31088 case TYPE_VECDOUBLE
:
31096 /* Returns whether the dependence between INSN and NEXT is considered
31097 costly by the given target. */
31100 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
31104 rtx load_mem
, str_mem
;
31106 /* If the flag is not enabled - no dependence is considered costly;
31107 allow all dependent insns in the same group.
31108 This is the most aggressive option. */
31109 if (rs6000_sched_costly_dep
== no_dep_costly
)
31112 /* If the flag is set to 1 - a dependence is always considered costly;
31113 do not allow dependent instructions in the same group.
31114 This is the most conservative option. */
31115 if (rs6000_sched_costly_dep
== all_deps_costly
)
31118 insn
= DEP_PRO (dep
);
31119 next
= DEP_CON (dep
);
31121 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
31122 && is_load_insn (next
, &load_mem
)
31123 && is_store_insn (insn
, &str_mem
))
31124 /* Prevent load after store in the same group. */
31127 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
31128 && is_load_insn (next
, &load_mem
)
31129 && is_store_insn (insn
, &str_mem
)
31130 && DEP_TYPE (dep
) == REG_DEP_TRUE
31131 && mem_locations_overlap(str_mem
, load_mem
))
31132 /* Prevent load after store in the same group if it is a true
31136 /* The flag is set to X; dependences with latency >= X are considered costly,
31137 and will not be scheduled in the same group. */
31138 if (rs6000_sched_costly_dep
<= max_dep_latency
31139 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
31145 /* Return the next insn after INSN that is found before TAIL is reached,
31146 skipping any "non-active" insns - insns that will not actually occupy
31147 an issue slot. Return NULL_RTX if such an insn is not found. */
31150 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
31152 if (insn
== NULL_RTX
|| insn
== tail
)
31157 insn
= NEXT_INSN (insn
);
31158 if (insn
== NULL_RTX
|| insn
== tail
)
31162 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
31163 || (NONJUMP_INSN_P (insn
)
31164 && GET_CODE (PATTERN (insn
)) != USE
31165 && GET_CODE (PATTERN (insn
)) != CLOBBER
31166 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
31172 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31175 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
31180 enum attr_type type
, type2
;
31182 type
= get_attr_type (last_scheduled_insn
);
31184 /* Try to issue fixed point divides back-to-back in pairs so they will be
31185 routed to separate execution units and execute in parallel. */
31186 if (type
== TYPE_DIV
&& divide_cnt
== 0)
31188 /* First divide has been scheduled. */
31191 /* Scan the ready list looking for another divide, if found move it
31192 to the end of the list so it is chosen next. */
31196 if (recog_memoized (ready
[pos
]) >= 0
31197 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
31200 for (i
= pos
; i
< lastpos
; i
++)
31201 ready
[i
] = ready
[i
+ 1];
31202 ready
[lastpos
] = tmp
;
31210 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31213 /* The best dispatch throughput for vector and vector load insns can be
31214 achieved by interleaving a vector and vector load such that they'll
31215 dispatch to the same superslice. If this pairing cannot be achieved
31216 then it is best to pair vector insns together and vector load insns
31219 To aid in this pairing, vec_pairing maintains the current state with
31220 the following values:
31222 0 : Initial state, no vecload/vector pairing has been started.
31224 1 : A vecload or vector insn has been issued and a candidate for
31225 pairing has been found and moved to the end of the ready
31227 if (type
== TYPE_VECLOAD
)
31229 /* Issued a vecload. */
31230 if (vec_pairing
== 0)
31232 int vecload_pos
= -1;
31233 /* We issued a single vecload, look for a vector insn to pair it
31234 with. If one isn't found, try to pair another vecload. */
31238 if (recog_memoized (ready
[pos
]) >= 0)
31240 type2
= get_attr_type (ready
[pos
]);
31241 if (is_power9_pairable_vec_type (type2
))
31243 /* Found a vector insn to pair with, move it to the
31244 end of the ready list so it is scheduled next. */
31246 for (i
= pos
; i
< lastpos
; i
++)
31247 ready
[i
] = ready
[i
+ 1];
31248 ready
[lastpos
] = tmp
;
31250 return cached_can_issue_more
;
31252 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
31253 /* Remember position of first vecload seen. */
31258 if (vecload_pos
>= 0)
31260 /* Didn't find a vector to pair with but did find a vecload,
31261 move it to the end of the ready list. */
31262 tmp
= ready
[vecload_pos
];
31263 for (i
= vecload_pos
; i
< lastpos
; i
++)
31264 ready
[i
] = ready
[i
+ 1];
31265 ready
[lastpos
] = tmp
;
31267 return cached_can_issue_more
;
31271 else if (is_power9_pairable_vec_type (type
))
31273 /* Issued a vector operation. */
31274 if (vec_pairing
== 0)
31277 /* We issued a single vector insn, look for a vecload to pair it
31278 with. If one isn't found, try to pair another vector. */
31282 if (recog_memoized (ready
[pos
]) >= 0)
31284 type2
= get_attr_type (ready
[pos
]);
31285 if (type2
== TYPE_VECLOAD
)
31287 /* Found a vecload insn to pair with, move it to the
31288 end of the ready list so it is scheduled next. */
31290 for (i
= pos
; i
< lastpos
; i
++)
31291 ready
[i
] = ready
[i
+ 1];
31292 ready
[lastpos
] = tmp
;
31294 return cached_can_issue_more
;
31296 else if (is_power9_pairable_vec_type (type2
)
31298 /* Remember position of first vector insn seen. */
31305 /* Didn't find a vecload to pair with but did find a vector
31306 insn, move it to the end of the ready list. */
31307 tmp
= ready
[vec_pos
];
31308 for (i
= vec_pos
; i
< lastpos
; i
++)
31309 ready
[i
] = ready
[i
+ 1];
31310 ready
[lastpos
] = tmp
;
31312 return cached_can_issue_more
;
31317 /* We've either finished a vec/vecload pair, couldn't find an insn to
31318 continue the current pair, or the last insn had nothing to do with
31319 with pairing. In any case, reset the state. */
31323 return cached_can_issue_more
;
31326 /* We are about to begin issuing insns for this clock cycle. */
31329 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
31330 rtx_insn
**ready ATTRIBUTE_UNUSED
,
31331 int *pn_ready ATTRIBUTE_UNUSED
,
31332 int clock_var ATTRIBUTE_UNUSED
)
31334 int n_ready
= *pn_ready
;
31337 fprintf (dump
, "// rs6000_sched_reorder :\n");
31339 /* Reorder the ready list, if the second to last ready insn
31340 is a nonepipeline insn. */
31341 if (rs6000_tune
== PROCESSOR_CELL
&& n_ready
> 1)
31343 if (is_nonpipeline_insn (ready
[n_ready
- 1])
31344 && (recog_memoized (ready
[n_ready
- 2]) > 0))
31345 /* Simply swap first two insns. */
31346 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
31349 if (rs6000_tune
== PROCESSOR_POWER6
)
31350 load_store_pendulum
= 0;
31352 return rs6000_issue_rate ();
31355 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31358 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
31359 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
31362 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
31364 /* For Power6, we need to handle some special cases to try and keep the
31365 store queue from overflowing and triggering expensive flushes.
31367 This code monitors how load and store instructions are being issued
31368 and skews the ready list one way or the other to increase the likelihood
31369 that a desired instruction is issued at the proper time.
31371 A couple of things are done. First, we maintain a "load_store_pendulum"
31372 to track the current state of load/store issue.
31374 - If the pendulum is at zero, then no loads or stores have been
31375 issued in the current cycle so we do nothing.
31377 - If the pendulum is 1, then a single load has been issued in this
31378 cycle and we attempt to locate another load in the ready list to
31381 - If the pendulum is -2, then two stores have already been
31382 issued in this cycle, so we increase the priority of the first load
31383 in the ready list to increase it's likelihood of being chosen first
31386 - If the pendulum is -1, then a single store has been issued in this
31387 cycle and we attempt to locate another store in the ready list to
31388 issue with it, preferring a store to an adjacent memory location to
31389 facilitate store pairing in the store queue.
31391 - If the pendulum is 2, then two loads have already been
31392 issued in this cycle, so we increase the priority of the first store
31393 in the ready list to increase it's likelihood of being chosen first
31396 - If the pendulum < -2 or > 2, then do nothing.
31398 Note: This code covers the most common scenarios. There exist non
31399 load/store instructions which make use of the LSU and which
31400 would need to be accounted for to strictly model the behavior
31401 of the machine. Those instructions are currently unaccounted
31402 for to help minimize compile time overhead of this code.
31404 if (rs6000_tune
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31409 rtx load_mem
, str_mem
;
31411 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31412 /* Issuing a store, swing the load_store_pendulum to the left */
31413 load_store_pendulum
--;
31414 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31415 /* Issuing a load, swing the load_store_pendulum to the right */
31416 load_store_pendulum
++;
31418 return cached_can_issue_more
;
31420 /* If the pendulum is balanced, or there is only one instruction on
31421 the ready list, then all is well, so return. */
31422 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31423 return cached_can_issue_more
;
31425 if (load_store_pendulum
== 1)
31427 /* A load has been issued in this cycle. Scan the ready list
31428 for another load to issue with it */
31433 if (is_load_insn (ready
[pos
], &load_mem
))
31435 /* Found a load. Move it to the head of the ready list,
31436 and adjust it's priority so that it is more likely to
31439 for (i
=pos
; i
<*pn_ready
-1; i
++)
31440 ready
[i
] = ready
[i
+ 1];
31441 ready
[*pn_ready
-1] = tmp
;
31443 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31444 INSN_PRIORITY (tmp
)++;
31450 else if (load_store_pendulum
== -2)
31452 /* Two stores have been issued in this cycle. Increase the
31453 priority of the first load in the ready list to favor it for
31454 issuing in the next cycle. */
31459 if (is_load_insn (ready
[pos
], &load_mem
)
31461 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31463 INSN_PRIORITY (ready
[pos
])++;
31465 /* Adjust the pendulum to account for the fact that a load
31466 was found and increased in priority. This is to prevent
31467 increasing the priority of multiple loads */
31468 load_store_pendulum
--;
31475 else if (load_store_pendulum
== -1)
31477 /* A store has been issued in this cycle. Scan the ready list for
31478 another store to issue with it, preferring a store to an adjacent
31480 int first_store_pos
= -1;
31486 if (is_store_insn (ready
[pos
], &str_mem
))
31489 /* Maintain the index of the first store found on the
31491 if (first_store_pos
== -1)
31492 first_store_pos
= pos
;
31494 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31495 && adjacent_mem_locations (str_mem
, str_mem2
))
31497 /* Found an adjacent store. Move it to the head of the
31498 ready list, and adjust it's priority so that it is
31499 more likely to stay there */
31501 for (i
=pos
; i
<*pn_ready
-1; i
++)
31502 ready
[i
] = ready
[i
+ 1];
31503 ready
[*pn_ready
-1] = tmp
;
31505 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31506 INSN_PRIORITY (tmp
)++;
31508 first_store_pos
= -1;
31516 if (first_store_pos
>= 0)
31518 /* An adjacent store wasn't found, but a non-adjacent store was,
31519 so move the non-adjacent store to the front of the ready
31520 list, and adjust its priority so that it is more likely to
31522 tmp
= ready
[first_store_pos
];
31523 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31524 ready
[i
] = ready
[i
+ 1];
31525 ready
[*pn_ready
-1] = tmp
;
31526 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31527 INSN_PRIORITY (tmp
)++;
31530 else if (load_store_pendulum
== 2)
31532 /* Two loads have been issued in this cycle. Increase the priority
31533 of the first store in the ready list to favor it for issuing in
31539 if (is_store_insn (ready
[pos
], &str_mem
)
31541 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31543 INSN_PRIORITY (ready
[pos
])++;
31545 /* Adjust the pendulum to account for the fact that a store
31546 was found and increased in priority. This is to prevent
31547 increasing the priority of multiple stores */
31548 load_store_pendulum
++;
31557 /* Do Power9 dependent reordering if necessary. */
31558 if (rs6000_tune
== PROCESSOR_POWER9
&& last_scheduled_insn
31559 && recog_memoized (last_scheduled_insn
) >= 0)
31560 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31562 return cached_can_issue_more
;
31565 /* Return whether the presence of INSN causes a dispatch group termination
31566 of group WHICH_GROUP.
31568 If WHICH_GROUP == current_group, this function will return true if INSN
31569 causes the termination of the current group (i.e, the dispatch group to
31570 which INSN belongs). This means that INSN will be the last insn in the
31571 group it belongs to.
31573 If WHICH_GROUP == previous_group, this function will return true if INSN
31574 causes the termination of the previous group (i.e, the dispatch group that
31575 precedes the group to which INSN belongs). This means that INSN will be
31576 the first insn in the group it belongs to). */
31579 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31586 first
= insn_must_be_first_in_group (insn
);
31587 last
= insn_must_be_last_in_group (insn
);
31592 if (which_group
== current_group
)
31594 else if (which_group
== previous_group
)
31602 insn_must_be_first_in_group (rtx_insn
*insn
)
31604 enum attr_type type
;
31608 || DEBUG_INSN_P (insn
)
31609 || GET_CODE (PATTERN (insn
)) == USE
31610 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31613 switch (rs6000_tune
)
31615 case PROCESSOR_POWER5
:
31616 if (is_cracked_insn (insn
))
31619 case PROCESSOR_POWER4
:
31620 if (is_microcoded_insn (insn
))
31623 if (!rs6000_sched_groups
)
31626 type
= get_attr_type (insn
);
31633 case TYPE_CR_LOGICAL
:
31646 case PROCESSOR_POWER6
:
31647 type
= get_attr_type (insn
);
31656 case TYPE_FPCOMPARE
:
31667 if (get_attr_dot (insn
) == DOT_NO
31668 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31673 if (get_attr_size (insn
) == SIZE_32
)
31681 if (get_attr_update (insn
) == UPDATE_YES
)
31689 case PROCESSOR_POWER7
:
31690 type
= get_attr_type (insn
);
31694 case TYPE_CR_LOGICAL
:
31708 if (get_attr_dot (insn
) == DOT_YES
)
31713 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31714 || get_attr_update (insn
) == UPDATE_YES
)
31721 if (get_attr_update (insn
) == UPDATE_YES
)
31729 case PROCESSOR_POWER8
:
31730 type
= get_attr_type (insn
);
31734 case TYPE_CR_LOGICAL
:
31742 case TYPE_VECSTORE
:
31749 if (get_attr_dot (insn
) == DOT_YES
)
31754 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31755 || get_attr_update (insn
) == UPDATE_YES
)
31760 if (get_attr_update (insn
) == UPDATE_YES
31761 && get_attr_indexed (insn
) == INDEXED_YES
)
31777 insn_must_be_last_in_group (rtx_insn
*insn
)
31779 enum attr_type type
;
31783 || DEBUG_INSN_P (insn
)
31784 || GET_CODE (PATTERN (insn
)) == USE
31785 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31788 switch (rs6000_tune
) {
31789 case PROCESSOR_POWER4
:
31790 case PROCESSOR_POWER5
:
31791 if (is_microcoded_insn (insn
))
31794 if (is_branch_slot_insn (insn
))
31798 case PROCESSOR_POWER6
:
31799 type
= get_attr_type (insn
);
31807 case TYPE_FPCOMPARE
:
31818 if (get_attr_dot (insn
) == DOT_NO
31819 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31824 if (get_attr_size (insn
) == SIZE_32
)
31832 case PROCESSOR_POWER7
:
31833 type
= get_attr_type (insn
);
31843 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31844 && get_attr_update (insn
) == UPDATE_YES
)
31849 if (get_attr_update (insn
) == UPDATE_YES
31850 && get_attr_indexed (insn
) == INDEXED_YES
)
31858 case PROCESSOR_POWER8
:
31859 type
= get_attr_type (insn
);
31871 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31872 && get_attr_update (insn
) == UPDATE_YES
)
31877 if (get_attr_update (insn
) == UPDATE_YES
31878 && get_attr_indexed (insn
) == INDEXED_YES
)
31893 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31894 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31897 is_costly_group (rtx
*group_insns
, rtx next_insn
)
31900 int issue_rate
= rs6000_issue_rate ();
31902 for (i
= 0; i
< issue_rate
; i
++)
31904 sd_iterator_def sd_it
;
31906 rtx insn
= group_insns
[i
];
31911 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
31913 rtx next
= DEP_CON (dep
);
31915 if (next
== next_insn
31916 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
31924 /* Utility of the function redefine_groups.
31925 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31926 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31927 to keep it "far" (in a separate group) from GROUP_INSNS, following
31928 one of the following schemes, depending on the value of the flag
31929 -minsert_sched_nops = X:
31930 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31931 in order to force NEXT_INSN into a separate group.
31932 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31933 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31934 insertion (has a group just ended, how many vacant issue slots remain in the
31935 last group, and how many dispatch groups were encountered so far). */
31938 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
31939 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
31944 int issue_rate
= rs6000_issue_rate ();
31945 bool end
= *group_end
;
31948 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
31949 return can_issue_more
;
31951 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
31952 return can_issue_more
;
31954 force
= is_costly_group (group_insns
, next_insn
);
31956 return can_issue_more
;
31958 if (sched_verbose
> 6)
31959 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
31960 *group_count
,can_issue_more
);
31962 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
31965 can_issue_more
= 0;
31967 /* Since only a branch can be issued in the last issue_slot, it is
31968 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31969 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31970 in this case the last nop will start a new group and the branch
31971 will be forced to the new group. */
31972 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
31975 /* Do we have a special group ending nop? */
31976 if (rs6000_tune
== PROCESSOR_POWER6
|| rs6000_tune
== PROCESSOR_POWER7
31977 || rs6000_tune
== PROCESSOR_POWER8
)
31979 nop
= gen_group_ending_nop ();
31980 emit_insn_before (nop
, next_insn
);
31981 can_issue_more
= 0;
31984 while (can_issue_more
> 0)
31987 emit_insn_before (nop
, next_insn
);
31995 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
31997 int n_nops
= rs6000_sched_insert_nops
;
31999 /* Nops can't be issued from the branch slot, so the effective
32000 issue_rate for nops is 'issue_rate - 1'. */
32001 if (can_issue_more
== 0)
32002 can_issue_more
= issue_rate
;
32004 if (can_issue_more
== 0)
32006 can_issue_more
= issue_rate
- 1;
32009 for (i
= 0; i
< issue_rate
; i
++)
32011 group_insns
[i
] = 0;
32018 emit_insn_before (nop
, next_insn
);
32019 if (can_issue_more
== issue_rate
- 1) /* new group begins */
32022 if (can_issue_more
== 0)
32024 can_issue_more
= issue_rate
- 1;
32027 for (i
= 0; i
< issue_rate
; i
++)
32029 group_insns
[i
] = 0;
32035 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32038 /* Is next_insn going to start a new group? */
32041 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32042 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32043 || (can_issue_more
< issue_rate
&&
32044 insn_terminates_group_p (next_insn
, previous_group
)));
32045 if (*group_end
&& end
)
32048 if (sched_verbose
> 6)
32049 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
32050 *group_count
, can_issue_more
);
32051 return can_issue_more
;
32054 return can_issue_more
;
32057 /* This function tries to synch the dispatch groups that the compiler "sees"
32058 with the dispatch groups that the processor dispatcher is expected to
32059 form in practice. It tries to achieve this synchronization by forcing the
32060 estimated processor grouping on the compiler (as opposed to the function
32061 'pad_goups' which tries to force the scheduler's grouping on the processor).
32063 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32064 examines the (estimated) dispatch groups that will be formed by the processor
32065 dispatcher. It marks these group boundaries to reflect the estimated
32066 processor grouping, overriding the grouping that the scheduler had marked.
32067 Depending on the value of the flag '-minsert-sched-nops' this function can
32068 force certain insns into separate groups or force a certain distance between
32069 them by inserting nops, for example, if there exists a "costly dependence"
32072 The function estimates the group boundaries that the processor will form as
32073 follows: It keeps track of how many vacant issue slots are available after
32074 each insn. A subsequent insn will start a new group if one of the following
32076 - no more vacant issue slots remain in the current dispatch group.
32077 - only the last issue slot, which is the branch slot, is vacant, but the next
32078 insn is not a branch.
32079 - only the last 2 or less issue slots, including the branch slot, are vacant,
32080 which means that a cracked insn (which occupies two issue slots) can't be
32081 issued in this group.
32082 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32083 start a new group. */
32086 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32089 rtx_insn
*insn
, *next_insn
;
32091 int can_issue_more
;
32094 int group_count
= 0;
32098 issue_rate
= rs6000_issue_rate ();
32099 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
32100 for (i
= 0; i
< issue_rate
; i
++)
32102 group_insns
[i
] = 0;
32104 can_issue_more
= issue_rate
;
32106 insn
= get_next_active_insn (prev_head_insn
, tail
);
32109 while (insn
!= NULL_RTX
)
32111 slot
= (issue_rate
- can_issue_more
);
32112 group_insns
[slot
] = insn
;
32114 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32115 if (insn_terminates_group_p (insn
, current_group
))
32116 can_issue_more
= 0;
32118 next_insn
= get_next_active_insn (insn
, tail
);
32119 if (next_insn
== NULL_RTX
)
32120 return group_count
+ 1;
32122 /* Is next_insn going to start a new group? */
32124 = (can_issue_more
== 0
32125 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
32126 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
32127 || (can_issue_more
< issue_rate
&&
32128 insn_terminates_group_p (next_insn
, previous_group
)));
32130 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
32131 next_insn
, &group_end
, can_issue_more
,
32137 can_issue_more
= 0;
32138 for (i
= 0; i
< issue_rate
; i
++)
32140 group_insns
[i
] = 0;
32144 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
32145 PUT_MODE (next_insn
, VOIDmode
);
32146 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
32147 PUT_MODE (next_insn
, TImode
);
32150 if (can_issue_more
== 0)
32151 can_issue_more
= issue_rate
;
32154 return group_count
;
32157 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32158 dispatch group boundaries that the scheduler had marked. Pad with nops
32159 any dispatch groups which have vacant issue slots, in order to force the
32160 scheduler's grouping on the processor dispatcher. The function
32161 returns the number of dispatch groups found. */
32164 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32167 rtx_insn
*insn
, *next_insn
;
32170 int can_issue_more
;
32172 int group_count
= 0;
32174 /* Initialize issue_rate. */
32175 issue_rate
= rs6000_issue_rate ();
32176 can_issue_more
= issue_rate
;
32178 insn
= get_next_active_insn (prev_head_insn
, tail
);
32179 next_insn
= get_next_active_insn (insn
, tail
);
32181 while (insn
!= NULL_RTX
)
32184 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32186 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
32188 if (next_insn
== NULL_RTX
)
32193 /* If the scheduler had marked group termination at this location
32194 (between insn and next_insn), and neither insn nor next_insn will
32195 force group termination, pad the group with nops to force group
32198 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32199 && !insn_terminates_group_p (insn
, current_group
)
32200 && !insn_terminates_group_p (next_insn
, previous_group
))
32202 if (!is_branch_slot_insn (next_insn
))
32205 while (can_issue_more
)
32208 emit_insn_before (nop
, next_insn
);
32213 can_issue_more
= issue_rate
;
32218 next_insn
= get_next_active_insn (insn
, tail
);
32221 return group_count
;
32224 /* We're beginning a new block. Initialize data structures as necessary. */
32227 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
32228 int sched_verbose ATTRIBUTE_UNUSED
,
32229 int max_ready ATTRIBUTE_UNUSED
)
32231 last_scheduled_insn
= NULL
;
32232 load_store_pendulum
= 0;
32237 /* The following function is called at the end of scheduling BB.
32238 After reload, it inserts nops at insn group bundling. */
32241 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
32246 fprintf (dump
, "=== Finishing schedule.\n");
32248 if (reload_completed
&& rs6000_sched_groups
)
32250 /* Do not run sched_finish hook when selective scheduling enabled. */
32251 if (sel_sched_p ())
32254 if (rs6000_sched_insert_nops
== sched_finish_none
)
32257 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32258 n_groups
= pad_groups (dump
, sched_verbose
,
32259 current_sched_info
->prev_head
,
32260 current_sched_info
->next_tail
);
32262 n_groups
= redefine_groups (dump
, sched_verbose
,
32263 current_sched_info
->prev_head
,
32264 current_sched_info
->next_tail
);
32266 if (sched_verbose
>= 6)
32268 fprintf (dump
, "ngroups = %d\n", n_groups
);
32269 print_rtl (dump
, current_sched_info
->prev_head
);
32270 fprintf (dump
, "Done finish_sched\n");
32275 struct rs6000_sched_context
32277 short cached_can_issue_more
;
32278 rtx_insn
*last_scheduled_insn
;
32279 int load_store_pendulum
;
32284 typedef struct rs6000_sched_context rs6000_sched_context_def
;
32285 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
32287 /* Allocate store for new scheduling context. */
32289 rs6000_alloc_sched_context (void)
32291 return xmalloc (sizeof (rs6000_sched_context_def
));
32294 /* If CLEAN_P is true then initializes _SC with clean data,
32295 and from the global context otherwise. */
32297 rs6000_init_sched_context (void *_sc
, bool clean_p
)
32299 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32303 sc
->cached_can_issue_more
= 0;
32304 sc
->last_scheduled_insn
= NULL
;
32305 sc
->load_store_pendulum
= 0;
32306 sc
->divide_cnt
= 0;
32307 sc
->vec_pairing
= 0;
32311 sc
->cached_can_issue_more
= cached_can_issue_more
;
32312 sc
->last_scheduled_insn
= last_scheduled_insn
;
32313 sc
->load_store_pendulum
= load_store_pendulum
;
32314 sc
->divide_cnt
= divide_cnt
;
32315 sc
->vec_pairing
= vec_pairing
;
32319 /* Sets the global scheduling context to the one pointed to by _SC. */
32321 rs6000_set_sched_context (void *_sc
)
32323 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32325 gcc_assert (sc
!= NULL
);
32327 cached_can_issue_more
= sc
->cached_can_issue_more
;
32328 last_scheduled_insn
= sc
->last_scheduled_insn
;
32329 load_store_pendulum
= sc
->load_store_pendulum
;
32330 divide_cnt
= sc
->divide_cnt
;
32331 vec_pairing
= sc
->vec_pairing
;
32336 rs6000_free_sched_context (void *_sc
)
32338 gcc_assert (_sc
!= NULL
);
32344 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
32346 switch (get_attr_type (insn
))
32361 /* Length in units of the trampoline for entering a nested function. */
32364 rs6000_trampoline_size (void)
32368 switch (DEFAULT_ABI
)
32371 gcc_unreachable ();
32374 ret
= (TARGET_32BIT
) ? 12 : 24;
32378 gcc_assert (!TARGET_32BIT
);
32384 ret
= (TARGET_32BIT
) ? 40 : 48;
32391 /* Emit RTL insns to initialize the variable parts of a trampoline.
32392 FNADDR is an RTX for the address of the function's pure code.
32393 CXT is an RTX for the static chain value for the function. */
32396 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
32398 int regsize
= (TARGET_32BIT
) ? 4 : 8;
32399 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
32400 rtx ctx_reg
= force_reg (Pmode
, cxt
);
32401 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
32403 switch (DEFAULT_ABI
)
32406 gcc_unreachable ();
32408 /* Under AIX, just build the 3 word function descriptor */
32411 rtx fnmem
, fn_reg
, toc_reg
;
32413 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32414 error ("you cannot take the address of a nested function if you use "
32415 "the %qs option", "-mno-pointers-to-nested-functions");
32417 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32418 fn_reg
= gen_reg_rtx (Pmode
);
32419 toc_reg
= gen_reg_rtx (Pmode
);
32421 /* Macro to shorten the code expansions below. */
32422 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32424 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32426 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32427 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32428 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32429 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32430 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32436 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32440 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32441 LCT_NORMAL
, VOIDmode
,
32443 GEN_INT (rs6000_trampoline_size ()), SImode
,
32451 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32452 identifier as an argument, so the front end shouldn't look it up. */
32455 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32457 return is_attribute_p ("altivec", attr_id
);
32460 /* Handle the "altivec" attribute. The attribute may have
32461 arguments as follows:
32463 __attribute__((altivec(vector__)))
32464 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32465 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32467 and may appear more than once (e.g., 'vector bool char') in a
32468 given declaration. */
32471 rs6000_handle_altivec_attribute (tree
*node
,
32472 tree name ATTRIBUTE_UNUSED
,
32474 int flags ATTRIBUTE_UNUSED
,
32475 bool *no_add_attrs
)
32477 tree type
= *node
, result
= NULL_TREE
;
32481 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32482 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32483 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32486 while (POINTER_TYPE_P (type
)
32487 || TREE_CODE (type
) == FUNCTION_TYPE
32488 || TREE_CODE (type
) == METHOD_TYPE
32489 || TREE_CODE (type
) == ARRAY_TYPE
)
32490 type
= TREE_TYPE (type
);
32492 mode
= TYPE_MODE (type
);
32494 /* Check for invalid AltiVec type qualifiers. */
32495 if (type
== long_double_type_node
)
32496 error ("use of %<long double%> in AltiVec types is invalid");
32497 else if (type
== boolean_type_node
)
32498 error ("use of boolean types in AltiVec types is invalid");
32499 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32500 error ("use of %<complex%> in AltiVec types is invalid");
32501 else if (DECIMAL_FLOAT_MODE_P (mode
))
32502 error ("use of decimal floating point types in AltiVec types is invalid");
32503 else if (!TARGET_VSX
)
32505 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32508 error ("use of %<long%> in AltiVec types is invalid for "
32509 "64-bit code without %qs", "-mvsx");
32510 else if (rs6000_warn_altivec_long
)
32511 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32514 else if (type
== long_long_unsigned_type_node
32515 || type
== long_long_integer_type_node
)
32516 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32518 else if (type
== double_type_node
)
32519 error ("use of %<double%> in AltiVec types is invalid without %qs",
32523 switch (altivec_type
)
32526 unsigned_p
= TYPE_UNSIGNED (type
);
32530 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32533 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32536 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32539 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32542 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32544 case E_SFmode
: result
= V4SF_type_node
; break;
32545 case E_DFmode
: result
= V2DF_type_node
; break;
32546 /* If the user says 'vector int bool', we may be handed the 'bool'
32547 attribute _before_ the 'vector' attribute, and so select the
32548 proper type in the 'b' case below. */
32549 case E_V4SImode
: case E_V8HImode
: case E_V16QImode
: case E_V4SFmode
:
32550 case E_V2DImode
: case E_V2DFmode
:
32558 case E_DImode
: case E_V2DImode
: result
= bool_V2DI_type_node
; break;
32559 case E_SImode
: case E_V4SImode
: result
= bool_V4SI_type_node
; break;
32560 case E_HImode
: case E_V8HImode
: result
= bool_V8HI_type_node
; break;
32561 case E_QImode
: case E_V16QImode
: result
= bool_V16QI_type_node
;
32568 case E_V8HImode
: result
= pixel_V8HI_type_node
;
32574 /* Propagate qualifiers attached to the element type
32575 onto the vector type. */
32576 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32577 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32579 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32582 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32587 /* AltiVec defines five built-in scalar types that serve as vector
32588 elements; we must teach the compiler how to mangle them. The 128-bit
32589 floating point mangling is target-specific as well. */
32591 static const char *
32592 rs6000_mangle_type (const_tree type
)
32594 type
= TYPE_MAIN_VARIANT (type
);
32596 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32597 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32600 if (type
== bool_char_type_node
) return "U6__boolc";
32601 if (type
== bool_short_type_node
) return "U6__bools";
32602 if (type
== pixel_type_node
) return "u7__pixel";
32603 if (type
== bool_int_type_node
) return "U6__booli";
32604 if (type
== bool_long_long_type_node
) return "U6__boolx";
32606 if (SCALAR_FLOAT_TYPE_P (type
) && FLOAT128_IBM_P (TYPE_MODE (type
)))
32608 if (SCALAR_FLOAT_TYPE_P (type
) && FLOAT128_IEEE_P (TYPE_MODE (type
)))
32609 return ieee128_mangling_gcc_8_1
? "U10__float128" : "u9__ieee128";
32611 /* For all other types, use the default mangling. */
32615 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32616 struct attribute_spec.handler. */
32619 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32620 tree args ATTRIBUTE_UNUSED
,
32621 int flags ATTRIBUTE_UNUSED
,
32622 bool *no_add_attrs
)
32624 if (TREE_CODE (*node
) != FUNCTION_TYPE
32625 && TREE_CODE (*node
) != FIELD_DECL
32626 && TREE_CODE (*node
) != TYPE_DECL
)
32628 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32630 *no_add_attrs
= true;
32636 /* Set longcall attributes on all functions declared when
32637 rs6000_default_long_calls is true. */
32639 rs6000_set_default_type_attributes (tree type
)
32641 if (rs6000_default_long_calls
32642 && (TREE_CODE (type
) == FUNCTION_TYPE
32643 || TREE_CODE (type
) == METHOD_TYPE
))
32644 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32646 TYPE_ATTRIBUTES (type
));
32649 darwin_set_default_type_attributes (type
);
32653 /* Return a reference suitable for calling a function with the
32654 longcall attribute. */
32657 rs6000_longcall_ref (rtx call_ref
, rtx arg
)
32659 /* System V adds '.' to the internal name, so skip them. */
32660 const char *call_name
= XSTR (call_ref
, 0);
32661 if (*call_name
== '.')
32663 while (*call_name
== '.')
32666 tree node
= get_identifier (call_name
);
32667 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
32671 && TARGET_TLS_MARKERS
32672 && (DEFAULT_ABI
== ABI_ELFv2
|| DEFAULT_ABI
== ABI_V4
))
32674 rtx base
= const0_rtx
;
32676 if (DEFAULT_ABI
== ABI_ELFv2
)
32678 base
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
32684 base
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
32687 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32688 may be used by a function global entry point. For SysV4, r11
32689 is used by __glink_PLTresolve lazy resolver entry. */
32690 rtx reg
= gen_rtx_REG (Pmode
, regno
);
32691 rtx hi
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (3, base
, call_ref
, arg
),
32693 rtx lo
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (3, reg
, call_ref
, arg
),
32695 emit_insn (gen_rtx_SET (reg
, hi
));
32696 emit_insn (gen_rtx_SET (reg
, lo
));
32700 return force_reg (Pmode
, call_ref
);
32703 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32704 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32707 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32708 struct attribute_spec.handler. */
32710 rs6000_handle_struct_attribute (tree
*node
, tree name
,
32711 tree args ATTRIBUTE_UNUSED
,
32712 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
32715 if (DECL_P (*node
))
32717 if (TREE_CODE (*node
) == TYPE_DECL
)
32718 type
= &TREE_TYPE (*node
);
32723 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
32724 || TREE_CODE (*type
) == UNION_TYPE
)))
32726 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
32727 *no_add_attrs
= true;
32730 else if ((is_attribute_p ("ms_struct", name
)
32731 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
32732 || ((is_attribute_p ("gcc_struct", name
)
32733 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
32735 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
32737 *no_add_attrs
= true;
32744 rs6000_ms_bitfield_layout_p (const_tree record_type
)
32746 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
32747 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
32748 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
32751 #ifdef USING_ELFOS_H
32753 /* A get_unnamed_section callback, used for switching to toc_section. */
32756 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
32758 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32759 && TARGET_MINIMAL_TOC
)
32761 if (!toc_initialized
)
32763 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32764 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32765 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
32766 fprintf (asm_out_file
, "\t.tc ");
32767 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
32768 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32769 fprintf (asm_out_file
, "\n");
32771 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32772 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32773 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32774 fprintf (asm_out_file
, " = .+32768\n");
32775 toc_initialized
= 1;
32778 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32780 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32782 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32783 if (!toc_initialized
)
32785 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32786 toc_initialized
= 1;
32791 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32792 if (!toc_initialized
)
32794 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32795 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32796 fprintf (asm_out_file
, " = .+32768\n");
32797 toc_initialized
= 1;
32802 /* Implement TARGET_ASM_INIT_SECTIONS. */
32805 rs6000_elf_asm_init_sections (void)
32808 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
32811 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
32812 SDATA2_SECTION_ASM_OP
);
32815 /* Implement TARGET_SELECT_RTX_SECTION. */
32818 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
32819 unsigned HOST_WIDE_INT align
)
32821 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
32822 return toc_section
;
32824 return default_elf_select_rtx_section (mode
, x
, align
);
32827 /* For a SYMBOL_REF, set generic flags and then perform some
32828 target-specific processing.
32830 When the AIX ABI is requested on a non-AIX system, replace the
32831 function name with the real name (with a leading .) rather than the
32832 function descriptor name. This saves a lot of overriding code to
32833 read the prefixes. */
32835 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
32837 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
32839 default_encode_section_info (decl
, rtl
, first
);
32842 && TREE_CODE (decl
) == FUNCTION_DECL
32844 && DEFAULT_ABI
== ABI_AIX
)
32846 rtx sym_ref
= XEXP (rtl
, 0);
32847 size_t len
= strlen (XSTR (sym_ref
, 0));
32848 char *str
= XALLOCAVEC (char, len
+ 2);
32850 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
32851 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
32856 compare_section_name (const char *section
, const char *templ
)
32860 len
= strlen (templ
);
32861 return (strncmp (section
, templ
, len
) == 0
32862 && (section
[len
] == 0 || section
[len
] == '.'));
32866 rs6000_elf_in_small_data_p (const_tree decl
)
32868 if (rs6000_sdata
== SDATA_NONE
)
32871 /* We want to merge strings, so we never consider them small data. */
32872 if (TREE_CODE (decl
) == STRING_CST
)
32875 /* Functions are never in the small data area. */
32876 if (TREE_CODE (decl
) == FUNCTION_DECL
)
32879 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
32881 const char *section
= DECL_SECTION_NAME (decl
);
32882 if (compare_section_name (section
, ".sdata")
32883 || compare_section_name (section
, ".sdata2")
32884 || compare_section_name (section
, ".gnu.linkonce.s")
32885 || compare_section_name (section
, ".sbss")
32886 || compare_section_name (section
, ".sbss2")
32887 || compare_section_name (section
, ".gnu.linkonce.sb")
32888 || strcmp (section
, ".PPC.EMB.sdata0") == 0
32889 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
32894 /* If we are told not to put readonly data in sdata, then don't. */
32895 if (TREE_READONLY (decl
) && rs6000_sdata
!= SDATA_EABI
32896 && !rs6000_readonly_in_sdata
)
32899 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
32902 && size
<= g_switch_value
32903 /* If it's not public, and we're not going to reference it there,
32904 there's no need to put it in the small data section. */
32905 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
32912 #endif /* USING_ELFOS_H */
32914 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32917 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
32919 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
32922 /* Do not place thread-local symbols refs in the object blocks. */
32925 rs6000_use_blocks_for_decl_p (const_tree decl
)
32927 return !DECL_THREAD_LOCAL_P (decl
);
32930 /* Return a REG that occurs in ADDR with coefficient 1.
32931 ADDR can be effectively incremented by incrementing REG.
32933 r0 is special and we must not select it as an address
32934 register by this routine since our caller will try to
32935 increment the returned register via an "la" instruction. */
32938 find_addr_reg (rtx addr
)
32940 while (GET_CODE (addr
) == PLUS
)
32942 if (GET_CODE (XEXP (addr
, 0)) == REG
32943 && REGNO (XEXP (addr
, 0)) != 0)
32944 addr
= XEXP (addr
, 0);
32945 else if (GET_CODE (XEXP (addr
, 1)) == REG
32946 && REGNO (XEXP (addr
, 1)) != 0)
32947 addr
= XEXP (addr
, 1);
32948 else if (CONSTANT_P (XEXP (addr
, 0)))
32949 addr
= XEXP (addr
, 1);
32950 else if (CONSTANT_P (XEXP (addr
, 1)))
32951 addr
= XEXP (addr
, 0);
32953 gcc_unreachable ();
32955 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
32960 rs6000_fatal_bad_address (rtx op
)
32962 fatal_insn ("bad address", op
);
32967 typedef struct branch_island_d
{
32968 tree function_name
;
32974 static vec
<branch_island
, va_gc
> *branch_islands
;
32976 /* Remember to generate a branch island for far calls to the given
32980 add_compiler_branch_island (tree label_name
, tree function_name
,
32983 branch_island bi
= {function_name
, label_name
, line_number
};
32984 vec_safe_push (branch_islands
, bi
);
32987 /* Generate far-jump branch islands for everything recorded in
32988 branch_islands. Invoked immediately after the last instruction of
32989 the epilogue has been emitted; the branch islands must be appended
32990 to, and contiguous with, the function body. Mach-O stubs are
32991 generated in machopic_output_stub(). */
32994 macho_branch_islands (void)
32998 while (!vec_safe_is_empty (branch_islands
))
33000 branch_island
*bi
= &branch_islands
->last ();
33001 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
33002 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
33003 char name_buf
[512];
33004 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33005 if (name
[0] == '*' || name
[0] == '&')
33006 strcpy (name_buf
, name
+1);
33010 strcpy (name_buf
+1, name
);
33012 strcpy (tmp_buf
, "\n");
33013 strcat (tmp_buf
, label
);
33014 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33015 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33016 dbxout_stabd (N_SLINE
, bi
->line_number
);
33017 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33020 if (TARGET_LINK_STACK
)
33023 get_ppc476_thunk_name (name
);
33024 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
33025 strcat (tmp_buf
, name
);
33026 strcat (tmp_buf
, "\n");
33027 strcat (tmp_buf
, label
);
33028 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33032 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
33033 strcat (tmp_buf
, label
);
33034 strcat (tmp_buf
, "_pic\n");
33035 strcat (tmp_buf
, label
);
33036 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
33039 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
33040 strcat (tmp_buf
, name_buf
);
33041 strcat (tmp_buf
, " - ");
33042 strcat (tmp_buf
, label
);
33043 strcat (tmp_buf
, "_pic)\n");
33045 strcat (tmp_buf
, "\tmtlr r0\n");
33047 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
33048 strcat (tmp_buf
, name_buf
);
33049 strcat (tmp_buf
, " - ");
33050 strcat (tmp_buf
, label
);
33051 strcat (tmp_buf
, "_pic)\n");
33053 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
33057 strcat (tmp_buf
, ":\nlis r12,hi16(");
33058 strcat (tmp_buf
, name_buf
);
33059 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
33060 strcat (tmp_buf
, name_buf
);
33061 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
33063 output_asm_insn (tmp_buf
, 0);
33064 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33065 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33066 dbxout_stabd (N_SLINE
, bi
->line_number
);
33067 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33068 branch_islands
->pop ();
33072 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33073 already there or not. */
33076 no_previous_def (tree function_name
)
33081 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33082 if (function_name
== bi
->function_name
)
33087 /* GET_PREV_LABEL gets the label name from the previous definition of
33091 get_prev_label (tree function_name
)
33096 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
33097 if (function_name
== bi
->function_name
)
33098 return bi
->label_name
;
33102 /* INSN is either a function call or a millicode call. It may have an
33103 unconditional jump in its delay slot.
33105 CALL_DEST is the routine we are calling. */
33108 macho_call_template (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
33109 int cookie_operand_number
)
33111 static char buf
[256];
33112 if (darwin_emit_branch_islands
33113 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
33114 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
33117 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
33119 if (no_previous_def (funname
))
33121 rtx label_rtx
= gen_label_rtx ();
33122 char *label_buf
, temp_buf
[256];
33123 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
33124 CODE_LABEL_NUMBER (label_rtx
));
33125 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
33126 labelname
= get_identifier (label_buf
);
33127 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
33130 labelname
= get_prev_label (funname
);
33132 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33133 instruction will reach 'foo', otherwise link as 'bl L42'".
33134 "L42" should be a 'branch island', that will do a far jump to
33135 'foo'. Branch islands are generated in
33136 macho_branch_islands(). */
33137 sprintf (buf
, "jbsr %%z%d,%.246s",
33138 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
33141 sprintf (buf
, "bl %%z%d", dest_operand_number
);
33145 /* Generate PIC and indirect symbol stubs. */
33148 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
33150 unsigned int length
;
33151 char *symbol_name
, *lazy_ptr_name
;
33152 char *local_label_0
;
33153 static int label
= 0;
33155 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33156 symb
= (*targetm
.strip_name_encoding
) (symb
);
33159 length
= strlen (symb
);
33160 symbol_name
= XALLOCAVEC (char, length
+ 32);
33161 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
33163 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
33164 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
33167 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
33169 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
33173 fprintf (file
, "\t.align 5\n");
33175 fprintf (file
, "%s:\n", stub
);
33176 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33179 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33180 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
33182 fprintf (file
, "\tmflr r0\n");
33183 if (TARGET_LINK_STACK
)
33186 get_ppc476_thunk_name (name
);
33187 fprintf (file
, "\tbl %s\n", name
);
33188 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33192 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
33193 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33195 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
33196 lazy_ptr_name
, local_label_0
);
33197 fprintf (file
, "\tmtlr r0\n");
33198 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
33199 (TARGET_64BIT
? "ldu" : "lwzu"),
33200 lazy_ptr_name
, local_label_0
);
33201 fprintf (file
, "\tmtctr r12\n");
33202 fprintf (file
, "\tbctr\n");
33206 fprintf (file
, "\t.align 4\n");
33208 fprintf (file
, "%s:\n", stub
);
33209 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33211 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
33212 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
33213 (TARGET_64BIT
? "ldu" : "lwzu"),
33215 fprintf (file
, "\tmtctr r12\n");
33216 fprintf (file
, "\tbctr\n");
33219 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
33220 fprintf (file
, "%s:\n", lazy_ptr_name
);
33221 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33222 fprintf (file
, "%sdyld_stub_binding_helper\n",
33223 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
33226 /* Legitimize PIC addresses. If the address is already
33227 position-independent, we return ORIG. Newly generated
33228 position-independent addresses go into a reg. This is REG if non
33229 zero, otherwise we allocate register(s) as necessary. */
33231 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33234 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
33239 if (reg
== NULL
&& !reload_completed
)
33240 reg
= gen_reg_rtx (Pmode
);
33242 if (GET_CODE (orig
) == CONST
)
33246 if (GET_CODE (XEXP (orig
, 0)) == PLUS
33247 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
33250 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
33252 /* Use a different reg for the intermediate value, as
33253 it will be marked UNCHANGING. */
33254 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
33255 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
33258 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
33261 if (GET_CODE (offset
) == CONST_INT
)
33263 if (SMALL_INT (offset
))
33264 return plus_constant (Pmode
, base
, INTVAL (offset
));
33265 else if (!reload_completed
)
33266 offset
= force_reg (Pmode
, offset
);
33269 rtx mem
= force_const_mem (Pmode
, orig
);
33270 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
33273 return gen_rtx_PLUS (Pmode
, base
, offset
);
33276 /* Fall back on generic machopic code. */
33277 return machopic_legitimize_pic_address (orig
, mode
, reg
);
33280 /* Output a .machine directive for the Darwin assembler, and call
33281 the generic start_file routine. */
33284 rs6000_darwin_file_start (void)
33286 static const struct
33290 HOST_WIDE_INT if_set
;
33292 { "ppc64", "ppc64", MASK_64BIT
},
33293 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
33294 { "power4", "ppc970", 0 },
33295 { "G5", "ppc970", 0 },
33296 { "7450", "ppc7450", 0 },
33297 { "7400", "ppc7400", MASK_ALTIVEC
},
33298 { "G4", "ppc7400", 0 },
33299 { "750", "ppc750", 0 },
33300 { "740", "ppc750", 0 },
33301 { "G3", "ppc750", 0 },
33302 { "604e", "ppc604e", 0 },
33303 { "604", "ppc604", 0 },
33304 { "603e", "ppc603", 0 },
33305 { "603", "ppc603", 0 },
33306 { "601", "ppc601", 0 },
33307 { NULL
, "ppc", 0 } };
33308 const char *cpu_id
= "";
33311 rs6000_file_start ();
33312 darwin_file_start ();
33314 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33316 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
33317 cpu_id
= rs6000_default_cpu
;
33319 if (global_options_set
.x_rs6000_cpu_index
)
33320 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
33322 /* Look through the mapping array. Pick the first name that either
33323 matches the argument, has a bit set in IF_SET that is also set
33324 in the target flags, or has a NULL name. */
33327 while (mapping
[i
].arg
!= NULL
33328 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
33329 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
33332 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
33335 #endif /* TARGET_MACHO */
33339 rs6000_elf_reloc_rw_mask (void)
33343 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33349 /* Record an element in the table of global constructors. SYMBOL is
33350 a SYMBOL_REF of the function to be called; PRIORITY is a number
33351 between 0 and MAX_INIT_PRIORITY.
33353 This differs from default_named_section_asm_out_constructor in
33354 that we have special handling for -mrelocatable. */
33356 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
33358 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
33360 const char *section
= ".ctors";
33363 if (priority
!= DEFAULT_INIT_PRIORITY
)
33365 sprintf (buf
, ".ctors.%.5u",
33366 /* Invert the numbering so the linker puts us in the proper
33367 order; constructors are run from right to left, and the
33368 linker sorts in increasing order. */
33369 MAX_INIT_PRIORITY
- priority
);
33373 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33374 assemble_align (POINTER_SIZE
);
33376 if (DEFAULT_ABI
== ABI_V4
33377 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33379 fputs ("\t.long (", asm_out_file
);
33380 output_addr_const (asm_out_file
, symbol
);
33381 fputs (")@fixup\n", asm_out_file
);
33384 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33387 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
33389 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
33391 const char *section
= ".dtors";
33394 if (priority
!= DEFAULT_INIT_PRIORITY
)
33396 sprintf (buf
, ".dtors.%.5u",
33397 /* Invert the numbering so the linker puts us in the proper
33398 order; constructors are run from right to left, and the
33399 linker sorts in increasing order. */
33400 MAX_INIT_PRIORITY
- priority
);
33404 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33405 assemble_align (POINTER_SIZE
);
33407 if (DEFAULT_ABI
== ABI_V4
33408 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33410 fputs ("\t.long (", asm_out_file
);
33411 output_addr_const (asm_out_file
, symbol
);
33412 fputs (")@fixup\n", asm_out_file
);
33415 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33419 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
33421 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
33423 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
33424 ASM_OUTPUT_LABEL (file
, name
);
33425 fputs (DOUBLE_INT_ASM_OP
, file
);
33426 rs6000_output_function_entry (file
, name
);
33427 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33430 fputs ("\t.size\t", file
);
33431 assemble_name (file
, name
);
33432 fputs (",24\n\t.type\t.", file
);
33433 assemble_name (file
, name
);
33434 fputs (",@function\n", file
);
33435 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33437 fputs ("\t.globl\t.", file
);
33438 assemble_name (file
, name
);
33443 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33444 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33445 rs6000_output_function_entry (file
, name
);
33446 fputs (":\n", file
);
33451 if (DEFAULT_ABI
== ABI_V4
33452 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33453 && !TARGET_SECURE_PLT
33454 && (!constant_pool_empty_p () || crtl
->profile
)
33455 && (uses_toc
= uses_TOC ()))
33460 switch_to_other_text_partition ();
33461 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33463 fprintf (file
, "\t.long ");
33464 assemble_name (file
, toc_label_name
);
33467 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33468 assemble_name (file
, buf
);
33471 switch_to_other_text_partition ();
33474 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33475 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33477 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33481 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33483 fprintf (file
, "\t.quad .TOC.-");
33484 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33485 assemble_name (file
, buf
);
33489 if (DEFAULT_ABI
== ABI_AIX
)
33491 const char *desc_name
, *orig_name
;
33493 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33494 desc_name
= orig_name
;
33495 while (*desc_name
== '.')
33498 if (TREE_PUBLIC (decl
))
33499 fprintf (file
, "\t.globl %s\n", desc_name
);
33501 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33502 fprintf (file
, "%s:\n", desc_name
);
33503 fprintf (file
, "\t.long %s\n", orig_name
);
33504 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33505 fputs ("\t.long 0\n", file
);
33506 fprintf (file
, "\t.previous\n");
33508 ASM_OUTPUT_LABEL (file
, name
);
33511 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33513 rs6000_elf_file_end (void)
33515 #ifdef HAVE_AS_GNU_ATTRIBUTE
33516 /* ??? The value emitted depends on options active at file end.
33517 Assume anyone using #pragma or attributes that might change
33518 options knows what they are doing. */
33519 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33520 && rs6000_passes_float
)
33524 if (TARGET_HARD_FLOAT
)
33528 if (rs6000_passes_long_double
)
33530 if (!TARGET_LONG_DOUBLE_128
)
33532 else if (TARGET_IEEEQUAD
)
33537 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33539 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33541 if (rs6000_passes_vector
)
33542 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33543 (TARGET_ALTIVEC_ABI
? 2 : 1));
33544 if (rs6000_returns_struct
)
33545 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33546 aix_struct_return
? 2 : 1);
33549 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33550 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33551 file_end_indicate_exec_stack ();
33554 if (flag_split_stack
)
33555 file_end_indicate_split_stack ();
33559 /* We have expanded a CPU builtin, so we need to emit a reference to
33560 the special symbol that LIBC uses to declare it supports the
33561 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33562 switch_to_section (data_section
);
33563 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33564 fprintf (asm_out_file
, "\t%s %s\n",
33565 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33572 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33573 #define HAVE_XCOFF_DWARF_EXTRAS 0
33576 static enum unwind_info_type
33577 rs6000_xcoff_debug_unwind_info (void)
33583 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33587 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33588 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33589 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33590 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33591 fprintf (asm_out_file
, ",");
33592 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33593 fprintf (asm_out_file
, "\n");
33597 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33599 fputs (GLOBAL_ASM_OP
, stream
);
33600 RS6000_OUTPUT_BASENAME (stream
, name
);
33601 putc ('\n', stream
);
33604 /* A get_unnamed_decl callback, used for read-only sections. PTR
33605 points to the section string variable. */
33608 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33610 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33611 *(const char *const *) directive
,
33612 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33615 /* Likewise for read-write sections. */
33618 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33620 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33621 *(const char *const *) directive
,
33622 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33626 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33628 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33629 *(const char *const *) directive
,
33630 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33633 /* A get_unnamed_section callback, used for switching to toc_section. */
33636 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33638 if (TARGET_MINIMAL_TOC
)
33640 /* toc_section is always selected at least once from
33641 rs6000_xcoff_file_start, so this is guaranteed to
33642 always be defined once and only once in each file. */
33643 if (!toc_initialized
)
33645 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33646 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33647 toc_initialized
= 1;
33649 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33650 (TARGET_32BIT
? "" : ",3"));
33653 fputs ("\t.toc\n", asm_out_file
);
33656 /* Implement TARGET_ASM_INIT_SECTIONS. */
33659 rs6000_xcoff_asm_init_sections (void)
33661 read_only_data_section
33662 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33663 &xcoff_read_only_section_name
);
33665 private_data_section
33666 = get_unnamed_section (SECTION_WRITE
,
33667 rs6000_xcoff_output_readwrite_section_asm_op
,
33668 &xcoff_private_data_section_name
);
33671 = get_unnamed_section (SECTION_TLS
,
33672 rs6000_xcoff_output_tls_section_asm_op
,
33673 &xcoff_tls_data_section_name
);
33675 tls_private_data_section
33676 = get_unnamed_section (SECTION_TLS
,
33677 rs6000_xcoff_output_tls_section_asm_op
,
33678 &xcoff_private_data_section_name
);
33680 read_only_private_data_section
33681 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33682 &xcoff_private_data_section_name
);
33685 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
33687 readonly_data_section
= read_only_data_section
;
33691 rs6000_xcoff_reloc_rw_mask (void)
33697 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
33698 tree decl ATTRIBUTE_UNUSED
)
33701 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
33703 if (flags
& SECTION_EXCLUDE
)
33705 else if (flags
& SECTION_DEBUG
)
33707 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
33710 else if (flags
& SECTION_CODE
)
33712 else if (flags
& SECTION_TLS
)
33714 else if (flags
& SECTION_WRITE
)
33719 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
33720 (flags
& SECTION_CODE
) ? "." : "",
33721 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
33724 #define IN_NAMED_SECTION(DECL) \
33725 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33726 && DECL_SECTION_NAME (DECL) != NULL)
33729 rs6000_xcoff_select_section (tree decl
, int reloc
,
33730 unsigned HOST_WIDE_INT align
)
33732 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33734 if (align
> BIGGEST_ALIGNMENT
)
33736 resolve_unique_section (decl
, reloc
, true);
33737 if (IN_NAMED_SECTION (decl
))
33738 return get_named_section (decl
, NULL
, reloc
);
33741 if (decl_readonly_section (decl
, reloc
))
33743 if (TREE_PUBLIC (decl
))
33744 return read_only_data_section
;
33746 return read_only_private_data_section
;
33751 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
33753 if (TREE_PUBLIC (decl
))
33754 return tls_data_section
;
33755 else if (bss_initializer_p (decl
))
33757 /* Convert to COMMON to emit in BSS. */
33758 DECL_COMMON (decl
) = 1;
33759 return tls_comm_section
;
33762 return tls_private_data_section
;
33766 if (TREE_PUBLIC (decl
))
33767 return data_section
;
33769 return private_data_section
;
33774 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
33778 /* Use select_section for private data and uninitialized data with
33779 alignment <= BIGGEST_ALIGNMENT. */
33780 if (!TREE_PUBLIC (decl
)
33781 || DECL_COMMON (decl
)
33782 || (DECL_INITIAL (decl
) == NULL_TREE
33783 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
33784 || DECL_INITIAL (decl
) == error_mark_node
33785 || (flag_zero_initialized_in_bss
33786 && initializer_zerop (DECL_INITIAL (decl
))))
33789 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
33790 name
= (*targetm
.strip_name_encoding
) (name
);
33791 set_decl_section_name (decl
, name
);
33794 /* Select section for constant in constant pool.
33796 On RS/6000, all constants are in the private read-only data area.
33797 However, if this is being placed in the TOC it must be output as a
33801 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
33802 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
33804 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33805 return toc_section
;
33807 return read_only_private_data_section
;
33810 /* Remove any trailing [DS] or the like from the symbol name. */
33812 static const char *
33813 rs6000_xcoff_strip_name_encoding (const char *name
)
33818 len
= strlen (name
);
33819 if (name
[len
- 1] == ']')
33820 return ggc_alloc_string (name
, len
- 4);
33825 /* Section attributes. AIX is always PIC. */
33827 static unsigned int
33828 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
33830 unsigned int align
;
33831 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
33833 /* Align to at least UNIT size. */
33834 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
33835 align
= MIN_UNITS_PER_WORD
;
33837 /* Increase alignment of large objects if not already stricter. */
33838 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
33839 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
33840 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
33842 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
33845 /* Output at beginning of assembler file.
33847 Initialize the section names for the RS/6000 at this point.
33849 Specify filename, including full path, to assembler.
33851 We want to go into the TOC section so at least one .toc will be emitted.
33852 Also, in order to output proper .bs/.es pairs, we need at least one static
33853 [RW] section emitted.
33855 Finally, declare mcount when profiling to make the assembler happy. */
33858 rs6000_xcoff_file_start (void)
33860 rs6000_gen_section_name (&xcoff_bss_section_name
,
33861 main_input_filename
, ".bss_");
33862 rs6000_gen_section_name (&xcoff_private_data_section_name
,
33863 main_input_filename
, ".rw_");
33864 rs6000_gen_section_name (&xcoff_read_only_section_name
,
33865 main_input_filename
, ".ro_");
33866 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
33867 main_input_filename
, ".tls_");
33868 rs6000_gen_section_name (&xcoff_tbss_section_name
,
33869 main_input_filename
, ".tbss_[UL]");
33871 fputs ("\t.file\t", asm_out_file
);
33872 output_quoted_string (asm_out_file
, main_input_filename
);
33873 fputc ('\n', asm_out_file
);
33874 if (write_symbols
!= NO_DEBUG
)
33875 switch_to_section (private_data_section
);
33876 switch_to_section (toc_section
);
33877 switch_to_section (text_section
);
33879 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
33880 rs6000_file_start ();
33883 /* Output at end of assembler file.
33884 On the RS/6000, referencing data should automatically pull in text. */
33887 rs6000_xcoff_file_end (void)
33889 switch_to_section (text_section
);
33890 fputs ("_section_.text:\n", asm_out_file
);
33891 switch_to_section (data_section
);
33892 fputs (TARGET_32BIT
33893 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33897 struct declare_alias_data
33900 bool function_descriptor
;
33903 /* Declare alias N. A helper function for for_node_and_aliases. */
33906 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
33908 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
33909 /* Main symbol is output specially, because varasm machinery does part of
33910 the job for us - we do not need to declare .globl/lglobs and such. */
33911 if (!n
->alias
|| n
->weakref
)
33914 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
33917 /* Prevent assemble_alias from trying to use .set pseudo operation
33918 that does not behave as expected by the middle-end. */
33919 TREE_ASM_WRITTEN (n
->decl
) = true;
33921 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
33922 char *buffer
= (char *) alloca (strlen (name
) + 2);
33924 int dollar_inside
= 0;
33926 strcpy (buffer
, name
);
33927 p
= strchr (buffer
, '$');
33931 p
= strchr (p
+ 1, '$');
33933 if (TREE_PUBLIC (n
->decl
))
33935 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
33937 if (dollar_inside
) {
33938 if (data
->function_descriptor
)
33939 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33940 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33942 if (data
->function_descriptor
)
33944 fputs ("\t.globl .", data
->file
);
33945 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33946 putc ('\n', data
->file
);
33948 fputs ("\t.globl ", data
->file
);
33949 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33950 putc ('\n', data
->file
);
33952 #ifdef ASM_WEAKEN_DECL
33953 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
33954 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
33961 if (data
->function_descriptor
)
33962 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33963 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33965 if (data
->function_descriptor
)
33967 fputs ("\t.lglobl .", data
->file
);
33968 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33969 putc ('\n', data
->file
);
33971 fputs ("\t.lglobl ", data
->file
);
33972 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33973 putc ('\n', data
->file
);
33975 if (data
->function_descriptor
)
33976 fputs (".", data
->file
);
33977 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33978 fputs (":\n", data
->file
);
33983 #ifdef HAVE_GAS_HIDDEN
33984 /* Helper function to calculate visibility of a DECL
33985 and return the value as a const string. */
33987 static const char *
33988 rs6000_xcoff_visibility (tree decl
)
33990 static const char * const visibility_types
[] = {
33991 "", ",protected", ",hidden", ",internal"
33994 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
33995 return visibility_types
[vis
];
34000 /* This macro produces the initial definition of a function name.
34001 On the RS/6000, we need to place an extra '.' in the function name and
34002 output the function descriptor.
34003 Dollar signs are converted to underscores.
34005 The csect for the function will have already been created when
34006 text_section was selected. We do have to go back to that csect, however.
34008 The third and fourth parameters to the .function pseudo-op (16 and 044)
34009 are placeholders which no longer have any use.
34011 Because AIX assembler's .set command has unexpected semantics, we output
34012 all aliases as alternative labels in front of the definition. */
34015 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
34017 char *buffer
= (char *) alloca (strlen (name
) + 1);
34019 int dollar_inside
= 0;
34020 struct declare_alias_data data
= {file
, false};
34022 strcpy (buffer
, name
);
34023 p
= strchr (buffer
, '$');
34027 p
= strchr (p
+ 1, '$');
34029 if (TREE_PUBLIC (decl
))
34031 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
34033 if (dollar_inside
) {
34034 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34035 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34037 fputs ("\t.globl .", file
);
34038 RS6000_OUTPUT_BASENAME (file
, buffer
);
34039 #ifdef HAVE_GAS_HIDDEN
34040 fputs (rs6000_xcoff_visibility (decl
), file
);
34047 if (dollar_inside
) {
34048 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
34049 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
34051 fputs ("\t.lglobl .", file
);
34052 RS6000_OUTPUT_BASENAME (file
, buffer
);
34055 fputs ("\t.csect ", file
);
34056 RS6000_OUTPUT_BASENAME (file
, buffer
);
34057 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
34058 RS6000_OUTPUT_BASENAME (file
, buffer
);
34059 fputs (":\n", file
);
34060 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34062 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
34063 RS6000_OUTPUT_BASENAME (file
, buffer
);
34064 fputs (", TOC[tc0], 0\n", file
);
34066 switch_to_section (function_section (decl
));
34068 RS6000_OUTPUT_BASENAME (file
, buffer
);
34069 fputs (":\n", file
);
34070 data
.function_descriptor
= true;
34071 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34073 if (!DECL_IGNORED_P (decl
))
34075 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
34076 xcoffout_declare_function (file
, decl
, buffer
);
34077 else if (write_symbols
== DWARF2_DEBUG
)
34079 name
= (*targetm
.strip_name_encoding
) (name
);
34080 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
34087 /* Output assembly language to globalize a symbol from a DECL,
34088 possibly with visibility. */
34091 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
34093 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
34094 fputs (GLOBAL_ASM_OP
, stream
);
34095 RS6000_OUTPUT_BASENAME (stream
, name
);
34096 #ifdef HAVE_GAS_HIDDEN
34097 fputs (rs6000_xcoff_visibility (decl
), stream
);
34099 putc ('\n', stream
);
34102 /* Output assembly language to define a symbol as COMMON from a DECL,
34103 possibly with visibility. */
34106 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
34107 tree decl ATTRIBUTE_UNUSED
,
34109 unsigned HOST_WIDE_INT size
,
34110 unsigned HOST_WIDE_INT align
)
34112 unsigned HOST_WIDE_INT align2
= 2;
34115 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
34119 fputs (COMMON_ASM_OP
, stream
);
34120 RS6000_OUTPUT_BASENAME (stream
, name
);
34123 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
34126 #ifdef HAVE_GAS_HIDDEN
34128 fputs (rs6000_xcoff_visibility (decl
), stream
);
34130 putc ('\n', stream
);
34133 /* This macro produces the initial definition of a object (variable) name.
34134 Because AIX assembler's .set command has unexpected semantics, we output
34135 all aliases as alternative labels in front of the definition. */
34138 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
34140 struct declare_alias_data data
= {file
, false};
34141 RS6000_OUTPUT_BASENAME (file
, name
);
34142 fputs (":\n", file
);
34143 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34147 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34150 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
34152 fputs (integer_asm_op (size
, FALSE
), file
);
34153 assemble_name (file
, label
);
34154 fputs ("-$", file
);
34157 /* Output a symbol offset relative to the dbase for the current object.
34158 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34161 __gcc_unwind_dbase is embedded in all executables/libraries through
34162 libgcc/config/rs6000/crtdbase.S. */
34165 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
34167 fputs (integer_asm_op (size
, FALSE
), file
);
34168 assemble_name (file
, label
);
34169 fputs("-__gcc_unwind_dbase", file
);
34174 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
34178 const char *symname
;
34180 default_encode_section_info (decl
, rtl
, first
);
34182 /* Careful not to prod global register variables. */
34185 symbol
= XEXP (rtl
, 0);
34186 if (GET_CODE (symbol
) != SYMBOL_REF
)
34189 flags
= SYMBOL_REF_FLAGS (symbol
);
34191 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34192 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
34194 SYMBOL_REF_FLAGS (symbol
) = flags
;
34196 /* Append mapping class to extern decls. */
34197 symname
= XSTR (symbol
, 0);
34198 if (decl
/* sync condition with assemble_external () */
34199 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
34200 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
34201 || TREE_CODE (decl
) == FUNCTION_DECL
)
34202 && symname
[strlen (symname
) - 1] != ']')
34204 char *newname
= (char *) alloca (strlen (symname
) + 5);
34205 strcpy (newname
, symname
);
34206 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
34207 ? "[DS]" : "[UA]"));
34208 XSTR (symbol
, 0) = ggc_strdup (newname
);
34211 #endif /* HAVE_AS_TLS */
34212 #endif /* TARGET_XCOFF */
34215 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
34216 const char *name
, const char *val
)
34218 fputs ("\t.weak\t", stream
);
34219 RS6000_OUTPUT_BASENAME (stream
, name
);
34220 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34221 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34224 fputs ("[DS]", stream
);
34225 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34227 fputs (rs6000_xcoff_visibility (decl
), stream
);
34229 fputs ("\n\t.weak\t.", stream
);
34230 RS6000_OUTPUT_BASENAME (stream
, name
);
34232 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34234 fputs (rs6000_xcoff_visibility (decl
), stream
);
34236 fputc ('\n', stream
);
34239 #ifdef ASM_OUTPUT_DEF
34240 ASM_OUTPUT_DEF (stream
, name
, val
);
34242 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34243 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34245 fputs ("\t.set\t.", stream
);
34246 RS6000_OUTPUT_BASENAME (stream
, name
);
34247 fputs (",.", stream
);
34248 RS6000_OUTPUT_BASENAME (stream
, val
);
34249 fputc ('\n', stream
);
34255 /* Return true if INSN should not be copied. */
34258 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
34260 return recog_memoized (insn
) >= 0
34261 && get_attr_cannot_copy (insn
);
34264 /* Compute a (partial) cost for rtx X. Return true if the complete
34265 cost has been computed, and false if subexpressions should be
34266 scanned. In either case, *TOTAL contains the cost result. */
34269 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34270 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
34272 int code
= GET_CODE (x
);
34276 /* On the RS/6000, if it is valid in the insn, it is free. */
34278 if (((outer_code
== SET
34279 || outer_code
== PLUS
34280 || outer_code
== MINUS
)
34281 && (satisfies_constraint_I (x
)
34282 || satisfies_constraint_L (x
)))
34283 || (outer_code
== AND
34284 && (satisfies_constraint_K (x
)
34286 ? satisfies_constraint_L (x
)
34287 : satisfies_constraint_J (x
))))
34288 || ((outer_code
== IOR
|| outer_code
== XOR
)
34289 && (satisfies_constraint_K (x
)
34291 ? satisfies_constraint_L (x
)
34292 : satisfies_constraint_J (x
))))
34293 || outer_code
== ASHIFT
34294 || outer_code
== ASHIFTRT
34295 || outer_code
== LSHIFTRT
34296 || outer_code
== ROTATE
34297 || outer_code
== ROTATERT
34298 || outer_code
== ZERO_EXTRACT
34299 || (outer_code
== MULT
34300 && satisfies_constraint_I (x
))
34301 || ((outer_code
== DIV
|| outer_code
== UDIV
34302 || outer_code
== MOD
|| outer_code
== UMOD
)
34303 && exact_log2 (INTVAL (x
)) >= 0)
34304 || (outer_code
== COMPARE
34305 && (satisfies_constraint_I (x
)
34306 || satisfies_constraint_K (x
)))
34307 || ((outer_code
== EQ
|| outer_code
== NE
)
34308 && (satisfies_constraint_I (x
)
34309 || satisfies_constraint_K (x
)
34311 ? satisfies_constraint_L (x
)
34312 : satisfies_constraint_J (x
))))
34313 || (outer_code
== GTU
34314 && satisfies_constraint_I (x
))
34315 || (outer_code
== LTU
34316 && satisfies_constraint_P (x
)))
34321 else if ((outer_code
== PLUS
34322 && reg_or_add_cint_operand (x
, VOIDmode
))
34323 || (outer_code
== MINUS
34324 && reg_or_sub_cint_operand (x
, VOIDmode
))
34325 || ((outer_code
== SET
34326 || outer_code
== IOR
34327 || outer_code
== XOR
)
34329 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
34331 *total
= COSTS_N_INSNS (1);
34337 case CONST_WIDE_INT
:
34341 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34345 /* When optimizing for size, MEM should be slightly more expensive
34346 than generating address, e.g., (plus (reg) (const)).
34347 L1 cache latency is about two instructions. */
34348 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34349 if (rs6000_slow_unaligned_access (mode
, MEM_ALIGN (x
)))
34350 *total
+= COSTS_N_INSNS (100);
34359 if (FLOAT_MODE_P (mode
))
34360 *total
= rs6000_cost
->fp
;
34362 *total
= COSTS_N_INSNS (1);
34366 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34367 && satisfies_constraint_I (XEXP (x
, 1)))
34369 if (INTVAL (XEXP (x
, 1)) >= -256
34370 && INTVAL (XEXP (x
, 1)) <= 255)
34371 *total
= rs6000_cost
->mulsi_const9
;
34373 *total
= rs6000_cost
->mulsi_const
;
34375 else if (mode
== SFmode
)
34376 *total
= rs6000_cost
->fp
;
34377 else if (FLOAT_MODE_P (mode
))
34378 *total
= rs6000_cost
->dmul
;
34379 else if (mode
== DImode
)
34380 *total
= rs6000_cost
->muldi
;
34382 *total
= rs6000_cost
->mulsi
;
34386 if (mode
== SFmode
)
34387 *total
= rs6000_cost
->fp
;
34389 *total
= rs6000_cost
->dmul
;
34394 if (FLOAT_MODE_P (mode
))
34396 *total
= mode
== DFmode
? rs6000_cost
->ddiv
34397 : rs6000_cost
->sdiv
;
34404 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34405 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
34407 if (code
== DIV
|| code
== MOD
)
34409 *total
= COSTS_N_INSNS (2);
34412 *total
= COSTS_N_INSNS (1);
34416 if (GET_MODE (XEXP (x
, 1)) == DImode
)
34417 *total
= rs6000_cost
->divdi
;
34419 *total
= rs6000_cost
->divsi
;
34421 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34422 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
34423 *total
+= COSTS_N_INSNS (2);
34427 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34431 *total
= COSTS_N_INSNS (4);
34435 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34439 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34443 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34446 *total
= COSTS_N_INSNS (1);
34450 if (CONST_INT_P (XEXP (x
, 1)))
34452 rtx left
= XEXP (x
, 0);
34453 rtx_code left_code
= GET_CODE (left
);
34455 /* rotate-and-mask: 1 insn. */
34456 if ((left_code
== ROTATE
34457 || left_code
== ASHIFT
34458 || left_code
== LSHIFTRT
)
34459 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34461 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34462 if (!CONST_INT_P (XEXP (left
, 1)))
34463 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34464 *total
+= COSTS_N_INSNS (1);
34468 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34469 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34470 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34471 || (val
& 0xffff) == val
34472 || (val
& 0xffff0000) == val
34473 || ((val
& 0xffff) == 0 && mode
== SImode
))
34475 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34476 *total
+= COSTS_N_INSNS (1);
34481 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34483 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34484 *total
+= COSTS_N_INSNS (2);
34489 *total
= COSTS_N_INSNS (1);
34494 *total
= COSTS_N_INSNS (1);
34500 *total
= COSTS_N_INSNS (1);
34504 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34505 the sign extend and shift separately within the insn. */
34506 if (TARGET_EXTSWSLI
&& mode
== DImode
34507 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34508 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34519 /* Handle mul_highpart. */
34520 if (outer_code
== TRUNCATE
34521 && GET_CODE (XEXP (x
, 0)) == MULT
)
34523 if (mode
== DImode
)
34524 *total
= rs6000_cost
->muldi
;
34526 *total
= rs6000_cost
->mulsi
;
34529 else if (outer_code
== AND
)
34532 *total
= COSTS_N_INSNS (1);
34537 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34540 *total
= COSTS_N_INSNS (1);
34546 if (!FLOAT_MODE_P (mode
))
34548 *total
= COSTS_N_INSNS (1);
34554 case UNSIGNED_FLOAT
:
34557 case FLOAT_TRUNCATE
:
34558 *total
= rs6000_cost
->fp
;
34562 if (mode
== DFmode
)
34563 *total
= rs6000_cost
->sfdf_convert
;
34565 *total
= rs6000_cost
->fp
;
34569 switch (XINT (x
, 1))
34572 *total
= rs6000_cost
->fp
;
34584 *total
= COSTS_N_INSNS (1);
34587 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34589 *total
= rs6000_cost
->fp
;
34598 /* Carry bit requires mode == Pmode.
34599 NEG or PLUS already counted so only add one. */
34601 && (outer_code
== NEG
|| outer_code
== PLUS
))
34603 *total
= COSTS_N_INSNS (1);
34611 if (outer_code
== SET
)
34613 if (XEXP (x
, 1) == const0_rtx
)
34615 *total
= COSTS_N_INSNS (2);
34620 *total
= COSTS_N_INSNS (3);
34625 if (outer_code
== COMPARE
)
34639 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34642 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34643 int opno
, int *total
, bool speed
)
34645 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34648 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34649 "opno = %d, total = %d, speed = %s, x:\n",
34650 ret
? "complete" : "scan inner",
34651 GET_MODE_NAME (mode
),
34652 GET_RTX_NAME (outer_code
),
34655 speed
? "true" : "false");
34663 rs6000_insn_cost (rtx_insn
*insn
, bool speed
)
34665 if (recog_memoized (insn
) < 0)
34669 return get_attr_length (insn
);
34671 int cost
= get_attr_cost (insn
);
34675 int n
= get_attr_length (insn
) / 4;
34676 enum attr_type type
= get_attr_type (insn
);
34683 cost
= COSTS_N_INSNS (n
+ 1);
34687 switch (get_attr_size (insn
))
34690 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const9
;
34693 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const
;
34696 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi
;
34699 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->muldi
;
34702 gcc_unreachable ();
34706 switch (get_attr_size (insn
))
34709 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divsi
;
34712 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divdi
;
34715 gcc_unreachable ();
34720 cost
= n
* rs6000_cost
->fp
;
34723 cost
= n
* rs6000_cost
->dmul
;
34726 cost
= n
* rs6000_cost
->sdiv
;
34729 cost
= n
* rs6000_cost
->ddiv
;
34736 cost
= COSTS_N_INSNS (n
+ 2);
34740 cost
= COSTS_N_INSNS (n
);
34746 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34749 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
34750 addr_space_t as
, bool speed
)
34752 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
34754 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34755 ret
, speed
? "true" : "false");
34762 /* A C expression returning the cost of moving data from a register of class
34763 CLASS1 to one of CLASS2. */
34766 rs6000_register_move_cost (machine_mode mode
,
34767 reg_class_t from
, reg_class_t to
)
34771 if (TARGET_DEBUG_COST
)
34774 /* Moves from/to GENERAL_REGS. */
34775 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
34776 || reg_classes_intersect_p (from
, GENERAL_REGS
))
34778 reg_class_t rclass
= from
;
34780 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
34783 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
34784 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
34785 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
34787 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34789 else if (rclass
== CR_REGS
)
34792 /* For those processors that have slow LR/CTR moves, make them more
34793 expensive than memory in order to bias spills to memory .*/
34794 else if ((rs6000_tune
== PROCESSOR_POWER6
34795 || rs6000_tune
== PROCESSOR_POWER7
34796 || rs6000_tune
== PROCESSOR_POWER8
34797 || rs6000_tune
== PROCESSOR_POWER9
)
34798 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
34799 ret
= 6 * hard_regno_nregs (0, mode
);
34802 /* A move will cost one instruction per GPR moved. */
34803 ret
= 2 * hard_regno_nregs (0, mode
);
34806 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34807 else if (VECTOR_MEM_VSX_P (mode
)
34808 && reg_classes_intersect_p (to
, VSX_REGS
)
34809 && reg_classes_intersect_p (from
, VSX_REGS
))
34810 ret
= 2 * hard_regno_nregs (FIRST_FPR_REGNO
, mode
);
34812 /* Moving between two similar registers is just one instruction. */
34813 else if (reg_classes_intersect_p (to
, from
))
34814 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
34816 /* Everything else has to go through GENERAL_REGS. */
34818 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
34819 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
34821 if (TARGET_DEBUG_COST
)
34823 if (dbg_cost_ctrl
== 1)
34825 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34826 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
34827 reg_class_names
[to
]);
34834 /* A C expressions returning the cost of moving data of MODE from a register to
34838 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
34839 bool in ATTRIBUTE_UNUSED
)
34843 if (TARGET_DEBUG_COST
)
34846 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
34847 ret
= 4 * hard_regno_nregs (0, mode
);
34848 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
34849 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
34850 ret
= 4 * hard_regno_nregs (32, mode
);
34851 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
34852 ret
= 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO
, mode
);
34854 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
34856 if (TARGET_DEBUG_COST
)
34858 if (dbg_cost_ctrl
== 1)
34860 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34861 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
34868 /* Returns a code for a target-specific builtin that implements
34869 reciprocal of the function, or NULL_TREE if not available. */
34872 rs6000_builtin_reciprocal (tree fndecl
)
34874 switch (DECL_FUNCTION_CODE (fndecl
))
34876 case VSX_BUILTIN_XVSQRTDP
:
34877 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
34880 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
34882 case VSX_BUILTIN_XVSQRTSP
:
34883 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
34886 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
34893 /* Load up a constant. If the mode is a vector mode, splat the value across
34894 all of the vector elements. */
34897 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
34901 if (mode
== SFmode
|| mode
== DFmode
)
34903 rtx d
= const_double_from_real_value (dconst
, mode
);
34904 reg
= force_reg (mode
, d
);
34906 else if (mode
== V4SFmode
)
34908 rtx d
= const_double_from_real_value (dconst
, SFmode
);
34909 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
34910 reg
= gen_reg_rtx (mode
);
34911 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34913 else if (mode
== V2DFmode
)
34915 rtx d
= const_double_from_real_value (dconst
, DFmode
);
34916 rtvec v
= gen_rtvec (2, d
, d
);
34917 reg
= gen_reg_rtx (mode
);
34918 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34921 gcc_unreachable ();
34926 /* Generate an FMA instruction. */
34929 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
34931 machine_mode mode
= GET_MODE (target
);
34934 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
34935 gcc_assert (dst
!= NULL
);
34938 emit_move_insn (target
, dst
);
34941 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34944 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
34946 machine_mode mode
= GET_MODE (dst
);
34949 /* This is a tad more complicated, since the fnma_optab is for
34950 a different expression: fma(-m1, m2, a), which is the same
34951 thing except in the case of signed zeros.
34953 Fortunately we know that if FMA is supported that FNMSUB is
34954 also supported in the ISA. Just expand it directly. */
34956 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
34958 r
= gen_rtx_NEG (mode
, a
);
34959 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
34960 r
= gen_rtx_NEG (mode
, r
);
34961 emit_insn (gen_rtx_SET (dst
, r
));
34964 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34965 add a reg_note saying that this was a division. Support both scalar and
34966 vector divide. Assumes no trapping math and finite arguments. */
34969 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
34971 machine_mode mode
= GET_MODE (dst
);
34972 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
34975 /* Low precision estimates guarantee 5 bits of accuracy. High
34976 precision estimates guarantee 14 bits of accuracy. SFmode
34977 requires 23 bits of accuracy. DFmode requires 52 bits of
34978 accuracy. Each pass at least doubles the accuracy, leading
34979 to the following. */
34980 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34981 if (mode
== DFmode
|| mode
== V2DFmode
)
34984 enum insn_code code
= optab_handler (smul_optab
, mode
);
34985 insn_gen_fn gen_mul
= GEN_FCN (code
);
34987 gcc_assert (code
!= CODE_FOR_nothing
);
34989 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
34991 /* x0 = 1./d estimate */
34992 x0
= gen_reg_rtx (mode
);
34993 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
34996 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34999 /* e0 = 1. - d * x0 */
35000 e0
= gen_reg_rtx (mode
);
35001 rs6000_emit_nmsub (e0
, d
, x0
, one
);
35003 /* x1 = x0 + e0 * x0 */
35004 x1
= gen_reg_rtx (mode
);
35005 rs6000_emit_madd (x1
, e0
, x0
, x0
);
35007 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
35008 ++i
, xprev
= xnext
, eprev
= enext
) {
35010 /* enext = eprev * eprev */
35011 enext
= gen_reg_rtx (mode
);
35012 emit_insn (gen_mul (enext
, eprev
, eprev
));
35014 /* xnext = xprev + enext * xprev */
35015 xnext
= gen_reg_rtx (mode
);
35016 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
35022 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35024 /* u = n * xprev */
35025 u
= gen_reg_rtx (mode
);
35026 emit_insn (gen_mul (u
, n
, xprev
));
35028 /* v = n - (d * u) */
35029 v
= gen_reg_rtx (mode
);
35030 rs6000_emit_nmsub (v
, d
, u
, n
);
35032 /* dst = (v * xprev) + u */
35033 rs6000_emit_madd (dst
, v
, xprev
, u
);
35036 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
35039 /* Goldschmidt's Algorithm for single/double-precision floating point
35040 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35043 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
35045 machine_mode mode
= GET_MODE (src
);
35046 rtx e
= gen_reg_rtx (mode
);
35047 rtx g
= gen_reg_rtx (mode
);
35048 rtx h
= gen_reg_rtx (mode
);
35050 /* Low precision estimates guarantee 5 bits of accuracy. High
35051 precision estimates guarantee 14 bits of accuracy. SFmode
35052 requires 23 bits of accuracy. DFmode requires 52 bits of
35053 accuracy. Each pass at least doubles the accuracy, leading
35054 to the following. */
35055 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
35056 if (mode
== DFmode
|| mode
== V2DFmode
)
35061 enum insn_code code
= optab_handler (smul_optab
, mode
);
35062 insn_gen_fn gen_mul
= GEN_FCN (code
);
35064 gcc_assert (code
!= CODE_FOR_nothing
);
35066 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
35068 /* e = rsqrt estimate */
35069 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
35072 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35075 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
35077 if (mode
== SFmode
)
35079 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
35082 emit_move_insn (e
, target
);
35086 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
35087 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
35091 /* g = sqrt estimate. */
35092 emit_insn (gen_mul (g
, e
, src
));
35093 /* h = 1/(2*sqrt) estimate. */
35094 emit_insn (gen_mul (h
, e
, mhalf
));
35100 rtx t
= gen_reg_rtx (mode
);
35101 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35102 /* Apply correction directly to 1/rsqrt estimate. */
35103 rs6000_emit_madd (dst
, e
, t
, e
);
35107 for (i
= 0; i
< passes
; i
++)
35109 rtx t1
= gen_reg_rtx (mode
);
35110 rtx g1
= gen_reg_rtx (mode
);
35111 rtx h1
= gen_reg_rtx (mode
);
35113 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
35114 rs6000_emit_madd (g1
, g
, t1
, g
);
35115 rs6000_emit_madd (h1
, h
, t1
, h
);
35120 /* Multiply by 2 for 1/rsqrt. */
35121 emit_insn (gen_add3_insn (dst
, h
, h
));
35126 rtx t
= gen_reg_rtx (mode
);
35127 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
35128 rs6000_emit_madd (dst
, g
, t
, g
);
35134 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35135 (Power7) targets. DST is the target, and SRC is the argument operand. */
35138 rs6000_emit_popcount (rtx dst
, rtx src
)
35140 machine_mode mode
= GET_MODE (dst
);
35143 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35144 if (TARGET_POPCNTD
)
35146 if (mode
== SImode
)
35147 emit_insn (gen_popcntdsi2 (dst
, src
));
35149 emit_insn (gen_popcntddi2 (dst
, src
));
35153 tmp1
= gen_reg_rtx (mode
);
35155 if (mode
== SImode
)
35157 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35158 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
35160 tmp2
= force_reg (SImode
, tmp2
);
35161 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
35165 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35166 tmp2
= expand_mult (DImode
, tmp1
,
35167 GEN_INT ((HOST_WIDE_INT
)
35168 0x01010101 << 32 | 0x01010101),
35170 tmp2
= force_reg (DImode
, tmp2
);
35171 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
35176 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35177 target, and SRC is the argument operand. */
35180 rs6000_emit_parity (rtx dst
, rtx src
)
35182 machine_mode mode
= GET_MODE (dst
);
35185 tmp
= gen_reg_rtx (mode
);
35187 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35190 if (mode
== SImode
)
35192 emit_insn (gen_popcntbsi2 (tmp
, src
));
35193 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
35197 emit_insn (gen_popcntbdi2 (tmp
, src
));
35198 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
35203 if (mode
== SImode
)
35205 /* Is mult+shift >= shift+xor+shift+xor? */
35206 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
35208 rtx tmp1
, tmp2
, tmp3
, tmp4
;
35210 tmp1
= gen_reg_rtx (SImode
);
35211 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35213 tmp2
= gen_reg_rtx (SImode
);
35214 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
35215 tmp3
= gen_reg_rtx (SImode
);
35216 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
35218 tmp4
= gen_reg_rtx (SImode
);
35219 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
35220 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
35223 rs6000_emit_popcount (tmp
, src
);
35224 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
35228 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35229 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
35231 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
35233 tmp1
= gen_reg_rtx (DImode
);
35234 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35236 tmp2
= gen_reg_rtx (DImode
);
35237 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
35238 tmp3
= gen_reg_rtx (DImode
);
35239 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
35241 tmp4
= gen_reg_rtx (DImode
);
35242 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
35243 tmp5
= gen_reg_rtx (DImode
);
35244 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
35246 tmp6
= gen_reg_rtx (DImode
);
35247 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
35248 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
35251 rs6000_emit_popcount (tmp
, src
);
35252 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
35256 /* Expand an Altivec constant permutation for little endian mode.
35257 OP0 and OP1 are the input vectors and TARGET is the output vector.
35258 SEL specifies the constant permutation vector.
35260 There are two issues: First, the two input operands must be
35261 swapped so that together they form a double-wide array in LE
35262 order. Second, the vperm instruction has surprising behavior
35263 in LE mode: it interprets the elements of the source vectors
35264 in BE mode ("left to right") and interprets the elements of
35265 the destination vector in LE mode ("right to left"). To
35266 correct for this, we must subtract each element of the permute
35267 control vector from 31.
35269 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35270 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35271 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35272 serve as the permute control vector. Then, in BE mode,
35276 places the desired result in vr9. However, in LE mode the
35277 vector contents will be
35279 vr10 = 00000003 00000002 00000001 00000000
35280 vr11 = 00000007 00000006 00000005 00000004
35282 The result of the vperm using the same permute control vector is
35284 vr9 = 05000000 07000000 01000000 03000000
35286 That is, the leftmost 4 bytes of vr10 are interpreted as the
35287 source for the rightmost 4 bytes of vr9, and so on.
35289 If we change the permute control vector to
35291 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35299 vr9 = 00000006 00000004 00000002 00000000. */
35302 altivec_expand_vec_perm_const_le (rtx target
, rtx op0
, rtx op1
,
35303 const vec_perm_indices
&sel
)
35307 rtx constv
, unspec
;
35309 /* Unpack and adjust the constant selector. */
35310 for (i
= 0; i
< 16; ++i
)
35312 unsigned int elt
= 31 - (sel
[i
] & 31);
35313 perm
[i
] = GEN_INT (elt
);
35316 /* Expand to a permute, swapping the inputs and using the
35317 adjusted selector. */
35319 op0
= force_reg (V16QImode
, op0
);
35321 op1
= force_reg (V16QImode
, op1
);
35323 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
35324 constv
= force_reg (V16QImode
, constv
);
35325 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
35327 if (!REG_P (target
))
35329 rtx tmp
= gen_reg_rtx (V16QImode
);
35330 emit_move_insn (tmp
, unspec
);
35334 emit_move_insn (target
, unspec
);
35337 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35338 permute control vector. But here it's not a constant, so we must
35339 generate a vector NAND or NOR to do the adjustment. */
35342 altivec_expand_vec_perm_le (rtx operands
[4])
35344 rtx notx
, iorx
, unspec
;
35345 rtx target
= operands
[0];
35346 rtx op0
= operands
[1];
35347 rtx op1
= operands
[2];
35348 rtx sel
= operands
[3];
35350 rtx norreg
= gen_reg_rtx (V16QImode
);
35351 machine_mode mode
= GET_MODE (target
);
35353 /* Get everything in regs so the pattern matches. */
35355 op0
= force_reg (mode
, op0
);
35357 op1
= force_reg (mode
, op1
);
35359 sel
= force_reg (V16QImode
, sel
);
35360 if (!REG_P (target
))
35361 tmp
= gen_reg_rtx (mode
);
35363 if (TARGET_P9_VECTOR
)
35365 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, sel
),
35370 /* Invert the selector with a VNAND if available, else a VNOR.
35371 The VNAND is preferred for future fusion opportunities. */
35372 notx
= gen_rtx_NOT (V16QImode
, sel
);
35373 iorx
= (TARGET_P8_VECTOR
35374 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
35375 : gen_rtx_AND (V16QImode
, notx
, notx
));
35376 emit_insn (gen_rtx_SET (norreg
, iorx
));
35378 /* Permute with operands reversed and adjusted selector. */
35379 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
35383 /* Copy into target, possibly by way of a register. */
35384 if (!REG_P (target
))
35386 emit_move_insn (tmp
, unspec
);
35390 emit_move_insn (target
, unspec
);
35393 /* Expand an Altivec constant permutation. Return true if we match
35394 an efficient implementation; false to fall back to VPERM.
35396 OP0 and OP1 are the input vectors and TARGET is the output vector.
35397 SEL specifies the constant permutation vector. */
35400 altivec_expand_vec_perm_const (rtx target
, rtx op0
, rtx op1
,
35401 const vec_perm_indices
&sel
)
35403 struct altivec_perm_insn
{
35404 HOST_WIDE_INT mask
;
35405 enum insn_code impl
;
35406 unsigned char perm
[16];
35408 static const struct altivec_perm_insn patterns
[] = {
35409 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
35410 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35411 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
35412 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35413 { OPTION_MASK_ALTIVEC
,
35414 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
35415 : CODE_FOR_altivec_vmrglb_direct
),
35416 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35417 { OPTION_MASK_ALTIVEC
,
35418 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
35419 : CODE_FOR_altivec_vmrglh_direct
),
35420 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35421 { OPTION_MASK_ALTIVEC
,
35422 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
35423 : CODE_FOR_altivec_vmrglw_direct
),
35424 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35425 { OPTION_MASK_ALTIVEC
,
35426 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
35427 : CODE_FOR_altivec_vmrghb_direct
),
35428 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35429 { OPTION_MASK_ALTIVEC
,
35430 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35431 : CODE_FOR_altivec_vmrghh_direct
),
35432 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35433 { OPTION_MASK_ALTIVEC
,
35434 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35435 : CODE_FOR_altivec_vmrghw_direct
),
35436 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35437 { OPTION_MASK_P8_VECTOR
,
35438 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35439 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35440 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35441 { OPTION_MASK_P8_VECTOR
,
35442 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35443 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35444 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35447 unsigned int i
, j
, elt
, which
;
35448 unsigned char perm
[16];
35452 /* Unpack the constant selector. */
35453 for (i
= which
= 0; i
< 16; ++i
)
35456 which
|= (elt
< 16 ? 1 : 2);
35460 /* Simplify the constant selector based on operands. */
35464 gcc_unreachable ();
35468 if (!rtx_equal_p (op0
, op1
))
35473 for (i
= 0; i
< 16; ++i
)
35485 /* Look for splat patterns. */
35490 for (i
= 0; i
< 16; ++i
)
35491 if (perm
[i
] != elt
)
35495 if (!BYTES_BIG_ENDIAN
)
35497 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35503 for (i
= 0; i
< 16; i
+= 2)
35504 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35508 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35509 x
= gen_reg_rtx (V8HImode
);
35510 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35512 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35519 for (i
= 0; i
< 16; i
+= 4)
35521 || perm
[i
+ 1] != elt
+ 1
35522 || perm
[i
+ 2] != elt
+ 2
35523 || perm
[i
+ 3] != elt
+ 3)
35527 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35528 x
= gen_reg_rtx (V4SImode
);
35529 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35531 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35537 /* Look for merge and pack patterns. */
35538 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35542 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35545 elt
= patterns
[j
].perm
[0];
35546 if (perm
[0] == elt
)
35548 else if (perm
[0] == elt
+ 16)
35552 for (i
= 1; i
< 16; ++i
)
35554 elt
= patterns
[j
].perm
[i
];
35556 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35557 else if (one_vec
&& elt
>= 16)
35559 if (perm
[i
] != elt
)
35564 enum insn_code icode
= patterns
[j
].impl
;
35565 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35566 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35568 /* For little-endian, don't use vpkuwum and vpkuhum if the
35569 underlying vector type is not V4SI and V8HI, respectively.
35570 For example, using vpkuwum with a V8HI picks up the even
35571 halfwords (BE numbering) when the even halfwords (LE
35572 numbering) are what we need. */
35573 if (!BYTES_BIG_ENDIAN
35574 && icode
== CODE_FOR_altivec_vpkuwum_direct
35575 && ((GET_CODE (op0
) == REG
35576 && GET_MODE (op0
) != V4SImode
)
35577 || (GET_CODE (op0
) == SUBREG
35578 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35580 if (!BYTES_BIG_ENDIAN
35581 && icode
== CODE_FOR_altivec_vpkuhum_direct
35582 && ((GET_CODE (op0
) == REG
35583 && GET_MODE (op0
) != V8HImode
)
35584 || (GET_CODE (op0
) == SUBREG
35585 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35588 /* For little-endian, the two input operands must be swapped
35589 (or swapped back) to ensure proper right-to-left numbering
35591 if (swapped
^ !BYTES_BIG_ENDIAN
)
35592 std::swap (op0
, op1
);
35593 if (imode
!= V16QImode
)
35595 op0
= gen_lowpart (imode
, op0
);
35596 op1
= gen_lowpart (imode
, op1
);
35598 if (omode
== V16QImode
)
35601 x
= gen_reg_rtx (omode
);
35602 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35603 if (omode
!= V16QImode
)
35604 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35609 if (!BYTES_BIG_ENDIAN
)
35611 altivec_expand_vec_perm_const_le (target
, op0
, op1
, sel
);
35618 /* Expand a VSX Permute Doubleword constant permutation.
35619 Return true if we match an efficient implementation. */
35622 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35623 unsigned char perm0
, unsigned char perm1
)
35627 /* If both selectors come from the same operand, fold to single op. */
35628 if ((perm0
& 2) == (perm1
& 2))
35635 /* If both operands are equal, fold to simpler permutation. */
35636 if (rtx_equal_p (op0
, op1
))
35639 perm1
= (perm1
& 1) + 2;
35641 /* If the first selector comes from the second operand, swap. */
35642 else if (perm0
& 2)
35648 std::swap (op0
, op1
);
35650 /* If the second selector does not come from the second operand, fail. */
35651 else if ((perm1
& 2) == 0)
35655 if (target
!= NULL
)
35657 machine_mode vmode
, dmode
;
35660 vmode
= GET_MODE (target
);
35661 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35662 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4).require ();
35663 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35664 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35665 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35666 emit_insn (gen_rtx_SET (target
, x
));
35671 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35674 rs6000_vectorize_vec_perm_const (machine_mode vmode
, rtx target
, rtx op0
,
35675 rtx op1
, const vec_perm_indices
&sel
)
35677 bool testing_p
= !target
;
35679 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35680 if (TARGET_ALTIVEC
&& testing_p
)
35683 /* Check for ps_merge* or xxpermdi insns. */
35684 if ((vmode
== V2DFmode
|| vmode
== V2DImode
) && VECTOR_MEM_VSX_P (vmode
))
35688 op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
35689 op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
35691 if (rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, sel
[0], sel
[1]))
35695 if (TARGET_ALTIVEC
)
35697 /* Force the target-independent code to lower to V16QImode. */
35698 if (vmode
!= V16QImode
)
35700 if (altivec_expand_vec_perm_const (target
, op0
, op1
, sel
))
35707 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35708 OP0 and OP1 are the input vectors and TARGET is the output vector.
35709 PERM specifies the constant permutation vector. */
35712 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
35713 machine_mode vmode
, const vec_perm_builder
&perm
)
35715 rtx x
= expand_vec_perm_const (vmode
, op0
, op1
, perm
, BLKmode
, target
);
35717 emit_move_insn (target
, x
);
35720 /* Expand an extract even operation. */
35723 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
35725 machine_mode vmode
= GET_MODE (target
);
35726 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
35727 vec_perm_builder
perm (nelt
, nelt
, 1);
35729 for (i
= 0; i
< nelt
; i
++)
35730 perm
.quick_push (i
* 2);
35732 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, perm
);
35735 /* Expand a vector interleave operation. */
35738 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
35740 machine_mode vmode
= GET_MODE (target
);
35741 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
35742 vec_perm_builder
perm (nelt
, nelt
, 1);
35744 high
= (highp
? 0 : nelt
/ 2);
35745 for (i
= 0; i
< nelt
/ 2; i
++)
35747 perm
.quick_push (i
+ high
);
35748 perm
.quick_push (i
+ nelt
+ high
);
35751 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, perm
);
35754 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35756 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
35758 HOST_WIDE_INT
hwi_scale (scale
);
35759 REAL_VALUE_TYPE r_pow
;
35760 rtvec v
= rtvec_alloc (2);
35762 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
35763 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
35764 elt
= const_double_from_real_value (r_pow
, DFmode
);
35765 RTVEC_ELT (v
, 0) = elt
;
35766 RTVEC_ELT (v
, 1) = elt
;
35767 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
35768 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
35771 /* Return an RTX representing where to find the function value of a
35772 function returning MODE. */
35774 rs6000_complex_function_value (machine_mode mode
)
35776 unsigned int regno
;
35778 machine_mode inner
= GET_MODE_INNER (mode
);
35779 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
35781 if (TARGET_FLOAT128_TYPE
35783 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
35784 regno
= ALTIVEC_ARG_RETURN
;
35786 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35787 regno
= FP_ARG_RETURN
;
35791 regno
= GP_ARG_RETURN
;
35793 /* 32-bit is OK since it'll go in r3/r4. */
35794 if (TARGET_32BIT
&& inner_bytes
>= 4)
35795 return gen_rtx_REG (mode
, regno
);
35798 if (inner_bytes
>= 8)
35799 return gen_rtx_REG (mode
, regno
);
35801 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
35803 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
35804 GEN_INT (inner_bytes
));
35805 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
35808 /* Return an rtx describing a return value of MODE as a PARALLEL
35809 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35810 stride REG_STRIDE. */
35813 rs6000_parallel_return (machine_mode mode
,
35814 int n_elts
, machine_mode elt_mode
,
35815 unsigned int regno
, unsigned int reg_stride
)
35817 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
35820 for (i
= 0; i
< n_elts
; i
++)
35822 rtx r
= gen_rtx_REG (elt_mode
, regno
);
35823 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
35824 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
35825 regno
+= reg_stride
;
35831 /* Target hook for TARGET_FUNCTION_VALUE.
35833 An integer value is in r3 and a floating-point value is in fp1,
35834 unless -msoft-float. */
35837 rs6000_function_value (const_tree valtype
,
35838 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
35839 bool outgoing ATTRIBUTE_UNUSED
)
35842 unsigned int regno
;
35843 machine_mode elt_mode
;
35846 /* Special handling for structs in darwin64. */
35848 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
35850 CUMULATIVE_ARGS valcum
;
35854 valcum
.fregno
= FP_ARG_MIN_REG
;
35855 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
35856 /* Do a trial code generation as if this were going to be passed as
35857 an argument; if any part goes in memory, we return NULL. */
35858 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
35861 /* Otherwise fall through to standard ABI rules. */
35864 mode
= TYPE_MODE (valtype
);
35866 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35867 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
35869 int first_reg
, n_regs
;
35871 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
35873 /* _Decimal128 must use even/odd register pairs. */
35874 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35875 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
35879 first_reg
= ALTIVEC_ARG_RETURN
;
35883 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
35886 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35887 if (TARGET_32BIT
&& TARGET_POWERPC64
)
35896 int count
= GET_MODE_SIZE (mode
) / 4;
35897 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
35900 if ((INTEGRAL_TYPE_P (valtype
)
35901 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
35902 || POINTER_TYPE_P (valtype
))
35903 mode
= TARGET_32BIT
? SImode
: DImode
;
35905 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35906 /* _Decimal128 must use an even/odd register pair. */
35907 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35908 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
35909 && !FLOAT128_VECTOR_P (mode
))
35910 regno
= FP_ARG_RETURN
;
35911 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
35912 && targetm
.calls
.split_complex_arg
)
35913 return rs6000_complex_function_value (mode
);
35914 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35915 return register is used in both cases, and we won't see V2DImode/V2DFmode
35916 for pure altivec, combine the two cases. */
35917 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
35918 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
35919 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
35920 regno
= ALTIVEC_ARG_RETURN
;
35922 regno
= GP_ARG_RETURN
;
35924 return gen_rtx_REG (mode
, regno
);
35927 /* Define how to find the value returned by a library function
35928 assuming the value has mode MODE. */
35930 rs6000_libcall_value (machine_mode mode
)
35932 unsigned int regno
;
35934 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35935 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
35936 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
35938 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35939 /* _Decimal128 must use an even/odd register pair. */
35940 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35941 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && TARGET_HARD_FLOAT
)
35942 regno
= FP_ARG_RETURN
;
35943 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35944 return register is used in both cases, and we won't see V2DImode/V2DFmode
35945 for pure altivec, combine the two cases. */
35946 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
35947 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
35948 regno
= ALTIVEC_ARG_RETURN
;
35949 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
35950 return rs6000_complex_function_value (mode
);
35952 regno
= GP_ARG_RETURN
;
35954 return gen_rtx_REG (mode
, regno
);
35957 /* Compute register pressure classes. We implement the target hook to avoid
35958 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35959 lead to incorrect estimates of number of available registers and therefor
35960 increased register pressure/spill. */
35962 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
35967 pressure_classes
[n
++] = GENERAL_REGS
;
35969 pressure_classes
[n
++] = VSX_REGS
;
35972 if (TARGET_ALTIVEC
)
35973 pressure_classes
[n
++] = ALTIVEC_REGS
;
35974 if (TARGET_HARD_FLOAT
)
35975 pressure_classes
[n
++] = FLOAT_REGS
;
35977 pressure_classes
[n
++] = CR_REGS
;
35978 pressure_classes
[n
++] = SPECIAL_REGS
;
35983 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35984 Frame pointer elimination is automatically handled.
35986 For the RS/6000, if frame pointer elimination is being done, we would like
35987 to convert ap into fp, not sp.
35989 We need r30 if -mminimal-toc was specified, and there are constant pool
35993 rs6000_can_eliminate (const int from
, const int to
)
35995 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
35996 ? ! frame_pointer_needed
35997 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
35998 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
35999 || constant_pool_empty_p ()
36003 /* Define the offset between two registers, FROM to be eliminated and its
36004 replacement TO, at the start of a routine. */
36006 rs6000_initial_elimination_offset (int from
, int to
)
36008 rs6000_stack_t
*info
= rs6000_stack_info ();
36009 HOST_WIDE_INT offset
;
36011 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36012 offset
= info
->push_p
? 0 : -info
->total_size
;
36013 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36015 offset
= info
->push_p
? 0 : -info
->total_size
;
36016 if (FRAME_GROWS_DOWNWARD
)
36017 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
36019 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36020 offset
= FRAME_GROWS_DOWNWARD
36021 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
36023 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
36024 offset
= info
->total_size
;
36025 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
36026 offset
= info
->push_p
? info
->total_size
: 0;
36027 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
36030 gcc_unreachable ();
36035 /* Fill in sizes of registers used by unwinder. */
36038 rs6000_init_dwarf_reg_sizes_extra (tree address
)
36040 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
36043 machine_mode mode
= TYPE_MODE (char_type_node
);
36044 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
36045 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
36046 rtx value
= gen_int_mode (16, mode
);
36048 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36049 The unwinder still needs to know the size of Altivec registers. */
36051 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
36053 int column
= DWARF_REG_TO_UNWIND_COLUMN
36054 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
36055 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
36057 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
36062 /* Map internal gcc register numbers to debug format register numbers.
36063 FORMAT specifies the type of debug register number to use:
36064 0 -- debug information, except for frame-related sections
36065 1 -- DWARF .debug_frame section
36066 2 -- DWARF .eh_frame section */
36069 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
36071 /* Except for the above, we use the internal number for non-DWARF
36072 debug information, and also for .eh_frame. */
36073 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
36076 /* On some platforms, we use the standard DWARF register
36077 numbering for .debug_info and .debug_frame. */
36078 #ifdef RS6000_USE_DWARF_NUMBERING
36081 if (regno
== LR_REGNO
)
36083 if (regno
== CTR_REGNO
)
36085 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36086 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36087 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36088 to the DWARF reg for CR. */
36089 if (format
== 1 && regno
== CR2_REGNO
)
36091 if (CR_REGNO_P (regno
))
36092 return regno
- CR0_REGNO
+ 86;
36093 if (regno
== CA_REGNO
)
36094 return 101; /* XER */
36095 if (ALTIVEC_REGNO_P (regno
))
36096 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
36097 if (regno
== VRSAVE_REGNO
)
36099 if (regno
== VSCR_REGNO
)
36105 /* target hook eh_return_filter_mode */
36106 static scalar_int_mode
36107 rs6000_eh_return_filter_mode (void)
36109 return TARGET_32BIT
? SImode
: word_mode
;
36112 /* Target hook for translate_mode_attribute. */
36113 static machine_mode
36114 rs6000_translate_mode_attribute (machine_mode mode
)
36116 if ((FLOAT128_IEEE_P (mode
)
36117 && ieee128_float_type_node
== long_double_type_node
)
36118 || (FLOAT128_IBM_P (mode
)
36119 && ibm128_float_type_node
== long_double_type_node
))
36120 return COMPLEX_MODE_P (mode
) ? E_TCmode
: E_TFmode
;
36124 /* Target hook for scalar_mode_supported_p. */
36126 rs6000_scalar_mode_supported_p (scalar_mode mode
)
36128 /* -m32 does not support TImode. This is the default, from
36129 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36130 same ABI as for -m32. But default_scalar_mode_supported_p allows
36131 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36132 for -mpowerpc64. */
36133 if (TARGET_32BIT
&& mode
== TImode
)
36136 if (DECIMAL_FLOAT_MODE_P (mode
))
36137 return default_decimal_float_supported_p ();
36138 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
36141 return default_scalar_mode_supported_p (mode
);
36144 /* Target hook for vector_mode_supported_p. */
36146 rs6000_vector_mode_supported_p (machine_mode mode
)
36148 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36149 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36151 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
36158 /* Target hook for floatn_mode. */
36159 static opt_scalar_float_mode
36160 rs6000_floatn_mode (int n
, bool extended
)
36170 if (TARGET_FLOAT128_TYPE
)
36171 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36173 return opt_scalar_float_mode ();
36176 return opt_scalar_float_mode ();
36179 /* Those are the only valid _FloatNx types. */
36180 gcc_unreachable ();
36194 if (TARGET_FLOAT128_TYPE
)
36195 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36197 return opt_scalar_float_mode ();
36200 return opt_scalar_float_mode ();
36206 /* Target hook for c_mode_for_suffix. */
36207 static machine_mode
36208 rs6000_c_mode_for_suffix (char suffix
)
36210 if (TARGET_FLOAT128_TYPE
)
36212 if (suffix
== 'q' || suffix
== 'Q')
36213 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36215 /* At the moment, we are not defining a suffix for IBM extended double.
36216 If/when the default for -mabi=ieeelongdouble is changed, and we want
36217 to support __ibm128 constants in legacy library code, we may need to
36218 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36219 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36220 __float80 constants. */
36226 /* Target hook for invalid_arg_for_unprototyped_fn. */
36227 static const char *
36228 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
36230 return (!rs6000_darwin64_abi
36232 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
36233 && (funcdecl
== NULL_TREE
36234 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
36235 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
36236 ? N_("AltiVec argument passed to unprototyped function")
36240 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36241 setup by using __stack_chk_fail_local hidden function instead of
36242 calling __stack_chk_fail directly. Otherwise it is better to call
36243 __stack_chk_fail directly. */
36245 static tree ATTRIBUTE_UNUSED
36246 rs6000_stack_protect_fail (void)
36248 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
36249 ? default_hidden_stack_protect_fail ()
36250 : default_external_stack_protect_fail ();
36253 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36256 static unsigned HOST_WIDE_INT
36257 rs6000_asan_shadow_offset (void)
36259 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
36263 /* Mask options that we want to support inside of attribute((target)) and
36264 #pragma GCC target operations. Note, we do not include things like
36265 64/32-bit, endianness, hard/soft floating point, etc. that would have
36266 different calling sequences. */
36268 struct rs6000_opt_mask
{
36269 const char *name
; /* option name */
36270 HOST_WIDE_INT mask
; /* mask to set */
36271 bool invert
; /* invert sense of mask */
36272 bool valid_target
; /* option is a target option */
36275 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
36277 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
36278 { "cmpb", OPTION_MASK_CMPB
, false, true },
36279 { "crypto", OPTION_MASK_CRYPTO
, false, true },
36280 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
36281 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
36282 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
36284 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, true },
36285 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, true },
36286 { "fprnd", OPTION_MASK_FPRND
, false, true },
36287 { "hard-dfp", OPTION_MASK_DFP
, false, true },
36288 { "htm", OPTION_MASK_HTM
, false, true },
36289 { "isel", OPTION_MASK_ISEL
, false, true },
36290 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
36291 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
36292 { "modulo", OPTION_MASK_MODULO
, false, true },
36293 { "mulhw", OPTION_MASK_MULHW
, false, true },
36294 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
36295 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
36296 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
36297 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
36298 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
36299 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
36300 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
36301 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
36302 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
36303 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
36304 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
36305 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
36306 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
36307 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
36308 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
36309 { "string", 0, false, true },
36310 { "update", OPTION_MASK_NO_UPDATE
, true , true },
36311 { "vsx", OPTION_MASK_VSX
, false, true },
36312 #ifdef OPTION_MASK_64BIT
36314 { "aix64", OPTION_MASK_64BIT
, false, false },
36315 { "aix32", OPTION_MASK_64BIT
, true, false },
36317 { "64", OPTION_MASK_64BIT
, false, false },
36318 { "32", OPTION_MASK_64BIT
, true, false },
36321 #ifdef OPTION_MASK_EABI
36322 { "eabi", OPTION_MASK_EABI
, false, false },
36324 #ifdef OPTION_MASK_LITTLE_ENDIAN
36325 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
36326 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
36328 #ifdef OPTION_MASK_RELOCATABLE
36329 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
36331 #ifdef OPTION_MASK_STRICT_ALIGN
36332 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
36334 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
36335 { "string", 0, false, false },
36338 /* Builtin mask mapping for printing the flags. */
36339 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
36341 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
36342 { "vsx", RS6000_BTM_VSX
, false, false },
36343 { "fre", RS6000_BTM_FRE
, false, false },
36344 { "fres", RS6000_BTM_FRES
, false, false },
36345 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
36346 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
36347 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
36348 { "cell", RS6000_BTM_CELL
, false, false },
36349 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
36350 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
36351 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
36352 { "crypto", RS6000_BTM_CRYPTO
, false, false },
36353 { "htm", RS6000_BTM_HTM
, false, false },
36354 { "hard-dfp", RS6000_BTM_DFP
, false, false },
36355 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
36356 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
36357 { "powerpc64", RS6000_BTM_POWERPC64
, false, false },
36358 { "float128", RS6000_BTM_FLOAT128
, false, false },
36359 { "float128-hw", RS6000_BTM_FLOAT128_HW
,false, false },
36362 /* Option variables that we want to support inside attribute((target)) and
36363 #pragma GCC target operations. */
36365 struct rs6000_opt_var
{
36366 const char *name
; /* option name */
36367 size_t global_offset
; /* offset of the option in global_options. */
36368 size_t target_offset
; /* offset of the option in target options. */
36371 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
36374 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
36375 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
36376 { "avoid-indexed-addresses",
36377 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
36378 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
36380 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
36381 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
36382 { "optimize-swaps",
36383 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
36384 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
36385 { "allow-movmisalign",
36386 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
36387 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
36389 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
36390 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
36392 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
36393 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
36394 { "align-branch-targets",
36395 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
36396 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
36398 offsetof (struct gcc_options
, x_tls_markers
),
36399 offsetof (struct cl_target_option
, x_tls_markers
), },
36401 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36402 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36404 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36405 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36406 { "speculate-indirect-jumps",
36407 offsetof (struct gcc_options
, x_rs6000_speculate_indirect_jumps
),
36408 offsetof (struct cl_target_option
, x_rs6000_speculate_indirect_jumps
), },
36411 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36412 parsing. Return true if there were no errors. */
36415 rs6000_inner_target_options (tree args
, bool attr_p
)
36419 if (args
== NULL_TREE
)
36422 else if (TREE_CODE (args
) == STRING_CST
)
36424 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36427 while ((q
= strtok (p
, ",")) != NULL
)
36429 bool error_p
= false;
36430 bool not_valid_p
= false;
36431 const char *cpu_opt
= NULL
;
36434 if (strncmp (q
, "cpu=", 4) == 0)
36436 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36437 if (cpu_index
>= 0)
36438 rs6000_cpu_index
= cpu_index
;
36445 else if (strncmp (q
, "tune=", 5) == 0)
36447 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36448 if (tune_index
>= 0)
36449 rs6000_tune_index
= tune_index
;
36459 bool invert
= false;
36463 if (strncmp (r
, "no-", 3) == 0)
36469 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36470 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36472 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36474 if (!rs6000_opt_masks
[i
].valid_target
)
36475 not_valid_p
= true;
36479 rs6000_isa_flags_explicit
|= mask
;
36481 /* VSX needs altivec, so -mvsx automagically sets
36482 altivec and disables -mavoid-indexed-addresses. */
36485 if (mask
== OPTION_MASK_VSX
)
36487 mask
|= OPTION_MASK_ALTIVEC
;
36488 TARGET_AVOID_XFORM
= 0;
36492 if (rs6000_opt_masks
[i
].invert
)
36496 rs6000_isa_flags
&= ~mask
;
36498 rs6000_isa_flags
|= mask
;
36503 if (error_p
&& !not_valid_p
)
36505 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36506 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36508 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36509 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36511 not_valid_p
= false;
36519 const char *eprefix
, *esuffix
;
36524 eprefix
= "__attribute__((__target__(";
36529 eprefix
= "#pragma GCC target ";
36534 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36536 else if (not_valid_p
)
36537 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36539 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36544 else if (TREE_CODE (args
) == TREE_LIST
)
36548 tree value
= TREE_VALUE (args
);
36551 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36555 args
= TREE_CHAIN (args
);
36557 while (args
!= NULL_TREE
);
36562 error ("attribute %<target%> argument not a string");
36569 /* Print out the target options as a list for -mdebug=target. */
36572 rs6000_debug_target_options (tree args
, const char *prefix
)
36574 if (args
== NULL_TREE
)
36575 fprintf (stderr
, "%s<NULL>", prefix
);
36577 else if (TREE_CODE (args
) == STRING_CST
)
36579 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36582 while ((q
= strtok (p
, ",")) != NULL
)
36585 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36590 else if (TREE_CODE (args
) == TREE_LIST
)
36594 tree value
= TREE_VALUE (args
);
36597 rs6000_debug_target_options (value
, prefix
);
36600 args
= TREE_CHAIN (args
);
36602 while (args
!= NULL_TREE
);
36606 gcc_unreachable ();
36612 /* Hook to validate attribute((target("..."))). */
36615 rs6000_valid_attribute_p (tree fndecl
,
36616 tree
ARG_UNUSED (name
),
36620 struct cl_target_option cur_target
;
36623 tree new_target
, new_optimize
;
36624 tree func_optimize
;
36626 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36628 if (TARGET_DEBUG_TARGET
)
36630 tree tname
= DECL_NAME (fndecl
);
36631 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36633 fprintf (stderr
, "function: %.*s\n",
36634 (int) IDENTIFIER_LENGTH (tname
),
36635 IDENTIFIER_POINTER (tname
));
36637 fprintf (stderr
, "function: unknown\n");
36639 fprintf (stderr
, "args:");
36640 rs6000_debug_target_options (args
, " ");
36641 fprintf (stderr
, "\n");
36644 fprintf (stderr
, "flags: 0x%x\n", flags
);
36646 fprintf (stderr
, "--------------------\n");
36649 /* attribute((target("default"))) does nothing, beyond
36650 affecting multi-versioning. */
36651 if (TREE_VALUE (args
)
36652 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36653 && TREE_CHAIN (args
) == NULL_TREE
36654 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36657 old_optimize
= build_optimization_node (&global_options
);
36658 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36660 /* If the function changed the optimization levels as well as setting target
36661 options, start with the optimizations specified. */
36662 if (func_optimize
&& func_optimize
!= old_optimize
)
36663 cl_optimization_restore (&global_options
,
36664 TREE_OPTIMIZATION (func_optimize
));
36666 /* The target attributes may also change some optimization flags, so update
36667 the optimization options if necessary. */
36668 cl_target_option_save (&cur_target
, &global_options
);
36669 rs6000_cpu_index
= rs6000_tune_index
= -1;
36670 ret
= rs6000_inner_target_options (args
, true);
36672 /* Set up any additional state. */
36675 ret
= rs6000_option_override_internal (false);
36676 new_target
= build_target_option_node (&global_options
);
36681 new_optimize
= build_optimization_node (&global_options
);
36688 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
36690 if (old_optimize
!= new_optimize
)
36691 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
36694 cl_target_option_restore (&global_options
, &cur_target
);
36696 if (old_optimize
!= new_optimize
)
36697 cl_optimization_restore (&global_options
,
36698 TREE_OPTIMIZATION (old_optimize
));
36704 /* Hook to validate the current #pragma GCC target and set the state, and
36705 update the macros based on what was changed. If ARGS is NULL, then
36706 POP_TARGET is used to reset the options. */
36709 rs6000_pragma_target_parse (tree args
, tree pop_target
)
36711 tree prev_tree
= build_target_option_node (&global_options
);
36713 struct cl_target_option
*prev_opt
, *cur_opt
;
36714 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
36715 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
36717 if (TARGET_DEBUG_TARGET
)
36719 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
36720 fprintf (stderr
, "args:");
36721 rs6000_debug_target_options (args
, " ");
36722 fprintf (stderr
, "\n");
36726 fprintf (stderr
, "pop_target:\n");
36727 debug_tree (pop_target
);
36730 fprintf (stderr
, "pop_target: <NULL>\n");
36732 fprintf (stderr
, "--------------------\n");
36737 cur_tree
= ((pop_target
)
36739 : target_option_default_node
);
36740 cl_target_option_restore (&global_options
,
36741 TREE_TARGET_OPTION (cur_tree
));
36745 rs6000_cpu_index
= rs6000_tune_index
= -1;
36746 if (!rs6000_inner_target_options (args
, false)
36747 || !rs6000_option_override_internal (false)
36748 || (cur_tree
= build_target_option_node (&global_options
))
36751 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
36752 fprintf (stderr
, "invalid pragma\n");
36758 target_option_current_node
= cur_tree
;
36759 rs6000_activate_target_options (target_option_current_node
);
36761 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36762 change the macros that are defined. */
36763 if (rs6000_target_modify_macros_ptr
)
36765 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
36766 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
36767 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
36769 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
36770 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
36771 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
36773 diff_bumask
= (prev_bumask
^ cur_bumask
);
36774 diff_flags
= (prev_flags
^ cur_flags
);
36776 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
36778 /* Delete old macros. */
36779 rs6000_target_modify_macros_ptr (false,
36780 prev_flags
& diff_flags
,
36781 prev_bumask
& diff_bumask
);
36783 /* Define new macros. */
36784 rs6000_target_modify_macros_ptr (true,
36785 cur_flags
& diff_flags
,
36786 cur_bumask
& diff_bumask
);
36794 /* Remember the last target of rs6000_set_current_function. */
36795 static GTY(()) tree rs6000_previous_fndecl
;
36797 /* Restore target's globals from NEW_TREE and invalidate the
36798 rs6000_previous_fndecl cache. */
36801 rs6000_activate_target_options (tree new_tree
)
36803 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
36804 if (TREE_TARGET_GLOBALS (new_tree
))
36805 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
36806 else if (new_tree
== target_option_default_node
)
36807 restore_target_globals (&default_target_globals
);
36809 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
36810 rs6000_previous_fndecl
= NULL_TREE
;
36813 /* Establish appropriate back-end context for processing the function
36814 FNDECL. The argument might be NULL to indicate processing at top
36815 level, outside of any function scope. */
36817 rs6000_set_current_function (tree fndecl
)
36819 if (TARGET_DEBUG_TARGET
)
36821 fprintf (stderr
, "\n==================== rs6000_set_current_function");
36824 fprintf (stderr
, ", fndecl %s (%p)",
36825 (DECL_NAME (fndecl
)
36826 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
36827 : "<unknown>"), (void *)fndecl
);
36829 if (rs6000_previous_fndecl
)
36830 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
36832 fprintf (stderr
, "\n");
36835 /* Only change the context if the function changes. This hook is called
36836 several times in the course of compiling a function, and we don't want to
36837 slow things down too much or call target_reinit when it isn't safe. */
36838 if (fndecl
== rs6000_previous_fndecl
)
36842 if (rs6000_previous_fndecl
== NULL_TREE
)
36843 old_tree
= target_option_current_node
;
36844 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
36845 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
36847 old_tree
= target_option_default_node
;
36850 if (fndecl
== NULL_TREE
)
36852 if (old_tree
!= target_option_current_node
)
36853 new_tree
= target_option_current_node
;
36855 new_tree
= NULL_TREE
;
36859 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36860 if (new_tree
== NULL_TREE
)
36861 new_tree
= target_option_default_node
;
36864 if (TARGET_DEBUG_TARGET
)
36868 fprintf (stderr
, "\nnew fndecl target specific options:\n");
36869 debug_tree (new_tree
);
36874 fprintf (stderr
, "\nold fndecl target specific options:\n");
36875 debug_tree (old_tree
);
36878 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
36879 fprintf (stderr
, "--------------------\n");
36882 if (new_tree
&& old_tree
!= new_tree
)
36883 rs6000_activate_target_options (new_tree
);
36886 rs6000_previous_fndecl
= fndecl
;
36890 /* Save the current options */
36893 rs6000_function_specific_save (struct cl_target_option
*ptr
,
36894 struct gcc_options
*opts
)
36896 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
36897 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
36900 /* Restore the current options */
36903 rs6000_function_specific_restore (struct gcc_options
*opts
,
36904 struct cl_target_option
*ptr
)
36907 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
36908 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
36909 (void) rs6000_option_override_internal (false);
36912 /* Print the current options */
36915 rs6000_function_specific_print (FILE *file
, int indent
,
36916 struct cl_target_option
*ptr
)
36918 rs6000_print_isa_options (file
, indent
, "Isa options set",
36919 ptr
->x_rs6000_isa_flags
);
36921 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
36922 ptr
->x_rs6000_isa_flags_explicit
);
36925 /* Helper function to print the current isa or misc options on a line. */
36928 rs6000_print_options_internal (FILE *file
,
36930 const char *string
,
36931 HOST_WIDE_INT flags
,
36932 const char *prefix
,
36933 const struct rs6000_opt_mask
*opts
,
36934 size_t num_elements
)
36937 size_t start_column
= 0;
36939 size_t max_column
= 120;
36940 size_t prefix_len
= strlen (prefix
);
36941 size_t comma_len
= 0;
36942 const char *comma
= "";
36945 start_column
+= fprintf (file
, "%*s", indent
, "");
36949 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
36953 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
36955 /* Print the various mask options. */
36956 cur_column
= start_column
;
36957 for (i
= 0; i
< num_elements
; i
++)
36959 bool invert
= opts
[i
].invert
;
36960 const char *name
= opts
[i
].name
;
36961 const char *no_str
= "";
36962 HOST_WIDE_INT mask
= opts
[i
].mask
;
36963 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
36967 if ((flags
& mask
) == 0)
36970 len
+= sizeof ("no-") - 1;
36978 if ((flags
& mask
) != 0)
36981 len
+= sizeof ("no-") - 1;
36988 if (cur_column
> max_column
)
36990 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
36991 cur_column
= start_column
+ len
;
36995 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
36997 comma_len
= sizeof (", ") - 1;
37000 fputs ("\n", file
);
37003 /* Helper function to print the current isa options on a line. */
37006 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
37007 HOST_WIDE_INT flags
)
37009 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
37010 &rs6000_opt_masks
[0],
37011 ARRAY_SIZE (rs6000_opt_masks
));
37015 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
37016 HOST_WIDE_INT flags
)
37018 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
37019 &rs6000_builtin_mask_names
[0],
37020 ARRAY_SIZE (rs6000_builtin_mask_names
));
37023 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37024 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37025 -mupper-regs-df, etc.).
37027 If the user used -mno-power8-vector, we need to turn off all of the implicit
37028 ISA 2.07 and 3.0 options that relate to the vector unit.
37030 If the user used -mno-power9-vector, we need to turn off all of the implicit
37031 ISA 3.0 options that relate to the vector unit.
37033 This function does not handle explicit options such as the user specifying
37034 -mdirect-move. These are handled in rs6000_option_override_internal, and
37035 the appropriate error is given if needed.
37037 We return a mask of all of the implicit options that should not be enabled
37040 static HOST_WIDE_INT
37041 rs6000_disable_incompatible_switches (void)
37043 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
37046 static const struct {
37047 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
37048 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
37049 const char *const name
; /* name of the switch. */
37051 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
37052 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
37053 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
37056 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
37058 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
37060 if ((rs6000_isa_flags
& no_flag
) == 0
37061 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
37063 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
37064 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
37070 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
37071 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
37073 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
37074 error ("%<-mno-%s%> turns off %<-m%s%>",
37076 rs6000_opt_masks
[j
].name
);
37079 gcc_assert (!set_flags
);
37082 rs6000_isa_flags
&= ~dep_flags
;
37083 ignore_masks
|= no_flag
| dep_flags
;
37087 return ignore_masks
;
37091 /* Helper function for printing the function name when debugging. */
37093 static const char *
37094 get_decl_name (tree fn
)
37101 name
= DECL_NAME (fn
);
37103 return "<no-name>";
37105 return IDENTIFIER_POINTER (name
);
37108 /* Return the clone id of the target we are compiling code for in a target
37109 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37110 the priority list for the target clones (ordered from lowest to
37114 rs6000_clone_priority (tree fndecl
)
37116 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
37117 HOST_WIDE_INT isa_masks
;
37118 int ret
= CLONE_DEFAULT
;
37119 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
37120 const char *attrs_str
= NULL
;
37122 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
37123 attrs_str
= TREE_STRING_POINTER (attrs
);
37125 /* Return priority zero for default function. Return the ISA needed for the
37126 function if it is not the default. */
37127 if (strcmp (attrs_str
, "default") != 0)
37129 if (fn_opts
== NULL_TREE
)
37130 fn_opts
= target_option_default_node
;
37132 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
37133 isa_masks
= rs6000_isa_flags
;
37135 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
37137 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
37138 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
37142 if (TARGET_DEBUG_TARGET
)
37143 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
37144 get_decl_name (fndecl
), ret
);
37149 /* This compares the priority of target features in function DECL1 and DECL2.
37150 It returns positive value if DECL1 is higher priority, negative value if
37151 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37152 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37155 rs6000_compare_version_priority (tree decl1
, tree decl2
)
37157 int priority1
= rs6000_clone_priority (decl1
);
37158 int priority2
= rs6000_clone_priority (decl2
);
37159 int ret
= priority1
- priority2
;
37161 if (TARGET_DEBUG_TARGET
)
37162 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
37163 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
37168 /* Make a dispatcher declaration for the multi-versioned function DECL.
37169 Calls to DECL function will be replaced with calls to the dispatcher
37170 by the front-end. Returns the decl of the dispatcher function. */
37173 rs6000_get_function_versions_dispatcher (void *decl
)
37175 tree fn
= (tree
) decl
;
37176 struct cgraph_node
*node
= NULL
;
37177 struct cgraph_node
*default_node
= NULL
;
37178 struct cgraph_function_version_info
*node_v
= NULL
;
37179 struct cgraph_function_version_info
*first_v
= NULL
;
37181 tree dispatch_decl
= NULL
;
37183 struct cgraph_function_version_info
*default_version_info
= NULL
;
37184 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
37186 if (TARGET_DEBUG_TARGET
)
37187 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
37188 get_decl_name (fn
));
37190 node
= cgraph_node::get (fn
);
37191 gcc_assert (node
!= NULL
);
37193 node_v
= node
->function_version ();
37194 gcc_assert (node_v
!= NULL
);
37196 if (node_v
->dispatcher_resolver
!= NULL
)
37197 return node_v
->dispatcher_resolver
;
37199 /* Find the default version and make it the first node. */
37201 /* Go to the beginning of the chain. */
37202 while (first_v
->prev
!= NULL
)
37203 first_v
= first_v
->prev
;
37205 default_version_info
= first_v
;
37206 while (default_version_info
!= NULL
)
37208 const tree decl2
= default_version_info
->this_node
->decl
;
37209 if (is_function_default_version (decl2
))
37211 default_version_info
= default_version_info
->next
;
37214 /* If there is no default node, just return NULL. */
37215 if (default_version_info
== NULL
)
37218 /* Make default info the first node. */
37219 if (first_v
!= default_version_info
)
37221 default_version_info
->prev
->next
= default_version_info
->next
;
37222 if (default_version_info
->next
)
37223 default_version_info
->next
->prev
= default_version_info
->prev
;
37224 first_v
->prev
= default_version_info
;
37225 default_version_info
->next
= first_v
;
37226 default_version_info
->prev
= NULL
;
37229 default_node
= default_version_info
->this_node
;
37231 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37232 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37233 "target_clones attribute needs GLIBC (2.23 and newer) that "
37234 "exports hardware capability bits");
37237 if (targetm
.has_ifunc_p ())
37239 struct cgraph_function_version_info
*it_v
= NULL
;
37240 struct cgraph_node
*dispatcher_node
= NULL
;
37241 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
37243 /* Right now, the dispatching is done via ifunc. */
37244 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
37246 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
37247 gcc_assert (dispatcher_node
!= NULL
);
37248 dispatcher_node
->dispatcher_function
= 1;
37249 dispatcher_version_info
37250 = dispatcher_node
->insert_new_function_version ();
37251 dispatcher_version_info
->next
= default_version_info
;
37252 dispatcher_node
->definition
= 1;
37254 /* Set the dispatcher for all the versions. */
37255 it_v
= default_version_info
;
37256 while (it_v
!= NULL
)
37258 it_v
->dispatcher_resolver
= dispatch_decl
;
37264 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37265 "multiversioning needs ifunc which is not supported "
37270 return dispatch_decl
;
37273 /* Make the resolver function decl to dispatch the versions of a multi-
37274 versioned function, DEFAULT_DECL. Create an empty basic block in the
37275 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37279 make_resolver_func (const tree default_decl
,
37280 const tree dispatch_decl
,
37281 basic_block
*empty_bb
)
37283 /* Make the resolver function static. The resolver function returns
37285 tree decl_name
= clone_function_name_numbered (default_decl
, "resolver");
37286 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
37287 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
37288 tree decl
= build_fn_decl (resolver_name
, type
);
37289 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
37291 DECL_NAME (decl
) = decl_name
;
37292 TREE_USED (decl
) = 1;
37293 DECL_ARTIFICIAL (decl
) = 1;
37294 DECL_IGNORED_P (decl
) = 0;
37295 TREE_PUBLIC (decl
) = 0;
37296 DECL_UNINLINABLE (decl
) = 1;
37298 /* Resolver is not external, body is generated. */
37299 DECL_EXTERNAL (decl
) = 0;
37300 DECL_EXTERNAL (dispatch_decl
) = 0;
37302 DECL_CONTEXT (decl
) = NULL_TREE
;
37303 DECL_INITIAL (decl
) = make_node (BLOCK
);
37304 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
37306 /* Build result decl and add to function_decl. */
37307 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
37308 DECL_ARTIFICIAL (t
) = 1;
37309 DECL_IGNORED_P (t
) = 1;
37310 DECL_RESULT (decl
) = t
;
37312 gimplify_function_tree (decl
);
37313 push_cfun (DECL_STRUCT_FUNCTION (decl
));
37314 *empty_bb
= init_lowered_empty_function (decl
, false,
37315 profile_count::uninitialized ());
37317 cgraph_node::add_new_function (decl
, true);
37318 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
37322 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37323 DECL_ATTRIBUTES (dispatch_decl
)
37324 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
37326 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
37331 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37332 return a pointer to VERSION_DECL if we are running on a machine that
37333 supports the index CLONE_ISA hardware architecture bits. This function will
37334 be called during version dispatch to decide which function version to
37335 execute. It returns the basic block at the end, to which more conditions
37339 add_condition_to_bb (tree function_decl
, tree version_decl
,
37340 int clone_isa
, basic_block new_bb
)
37342 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
37344 gcc_assert (new_bb
!= NULL
);
37345 gimple_seq gseq
= bb_seq (new_bb
);
37348 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
37349 build_fold_addr_expr (version_decl
));
37350 tree result_var
= create_tmp_var (ptr_type_node
);
37351 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
37352 gimple
*return_stmt
= gimple_build_return (result_var
);
37354 if (clone_isa
== CLONE_DEFAULT
)
37356 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37357 gimple_seq_add_stmt (&gseq
, return_stmt
);
37358 set_bb_seq (new_bb
, gseq
);
37359 gimple_set_bb (convert_stmt
, new_bb
);
37360 gimple_set_bb (return_stmt
, new_bb
);
37365 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
37366 tree cond_var
= create_tmp_var (bool_int_type_node
);
37367 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
37368 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
37369 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
37370 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
37371 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
37373 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
37374 gimple_set_bb (call_cond_stmt
, new_bb
);
37375 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
37377 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
37378 NULL_TREE
, NULL_TREE
);
37379 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
37380 gimple_set_bb (if_else_stmt
, new_bb
);
37381 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
37383 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37384 gimple_seq_add_stmt (&gseq
, return_stmt
);
37385 set_bb_seq (new_bb
, gseq
);
37387 basic_block bb1
= new_bb
;
37388 edge e12
= split_block (bb1
, if_else_stmt
);
37389 basic_block bb2
= e12
->dest
;
37390 e12
->flags
&= ~EDGE_FALLTHRU
;
37391 e12
->flags
|= EDGE_TRUE_VALUE
;
37393 edge e23
= split_block (bb2
, return_stmt
);
37394 gimple_set_bb (convert_stmt
, bb2
);
37395 gimple_set_bb (return_stmt
, bb2
);
37397 basic_block bb3
= e23
->dest
;
37398 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
37401 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
37407 /* This function generates the dispatch function for multi-versioned functions.
37408 DISPATCH_DECL is the function which will contain the dispatch logic.
37409 FNDECLS are the function choices for dispatch, and is a tree chain.
37410 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37411 code is generated. */
37414 dispatch_function_versions (tree dispatch_decl
,
37416 basic_block
*empty_bb
)
37420 vec
<tree
> *fndecls
;
37421 tree clones
[CLONE_MAX
];
37423 if (TARGET_DEBUG_TARGET
)
37424 fputs ("dispatch_function_versions, top\n", stderr
);
37426 gcc_assert (dispatch_decl
!= NULL
37427 && fndecls_p
!= NULL
37428 && empty_bb
!= NULL
);
37430 /* fndecls_p is actually a vector. */
37431 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37433 /* At least one more version other than the default. */
37434 gcc_assert (fndecls
->length () >= 2);
37436 /* The first version in the vector is the default decl. */
37437 memset ((void *) clones
, '\0', sizeof (clones
));
37438 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37440 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37441 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37442 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37443 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37444 to insert the code here to do the call. */
37446 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37448 int priority
= rs6000_clone_priority (ele
);
37449 if (!clones
[priority
])
37450 clones
[priority
] = ele
;
37453 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37456 if (TARGET_DEBUG_TARGET
)
37457 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37458 ix
, get_decl_name (clones
[ix
]));
37460 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37467 /* Generate the dispatching code body to dispatch multi-versioned function
37468 DECL. The target hook is called to process the "target" attributes and
37469 provide the code to dispatch the right function at run-time. NODE points
37470 to the dispatcher decl whose body will be created. */
37473 rs6000_generate_version_dispatcher_body (void *node_p
)
37476 basic_block empty_bb
;
37477 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37478 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37480 if (ninfo
->dispatcher_resolver
)
37481 return ninfo
->dispatcher_resolver
;
37483 /* node is going to be an alias, so remove the finalized bit. */
37484 node
->definition
= false;
37486 /* The first version in the chain corresponds to the default version. */
37487 ninfo
->dispatcher_resolver
= resolver
37488 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37490 if (TARGET_DEBUG_TARGET
)
37491 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37492 get_decl_name (resolver
));
37494 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37495 auto_vec
<tree
, 2> fn_ver_vec
;
37497 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37499 vinfo
= vinfo
->next
)
37501 struct cgraph_node
*version
= vinfo
->this_node
;
37502 /* Check for virtual functions here again, as by this time it should
37503 have been determined if this function needs a vtable index or
37504 not. This happens for methods in derived classes that override
37505 virtual methods in base classes but are not explicitly marked as
37507 if (DECL_VINDEX (version
->decl
))
37508 sorry ("Virtual function multiversioning not supported");
37510 fn_ver_vec
.safe_push (version
->decl
);
37513 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37514 cgraph_edge::rebuild_edges ();
37520 /* Hook to determine if one function can safely inline another. */
37523 rs6000_can_inline_p (tree caller
, tree callee
)
37526 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37527 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37529 /* If callee has no option attributes, then it is ok to inline. */
37533 /* If caller has no option attributes, but callee does then it is not ok to
37535 else if (!caller_tree
)
37540 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37541 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37543 /* Callee's options should a subset of the caller's, i.e. a vsx function
37544 can inline an altivec function but a non-vsx function can't inline a
37546 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37547 == callee_opts
->x_rs6000_isa_flags
)
37551 if (TARGET_DEBUG_TARGET
)
37552 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37553 get_decl_name (caller
), get_decl_name (callee
),
37554 (ret
? "can" : "cannot"));
37559 /* Allocate a stack temp and fixup the address so it meets the particular
37560 memory requirements (either offetable or REG+REG addressing). */
37563 rs6000_allocate_stack_temp (machine_mode mode
,
37564 bool offsettable_p
,
37567 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37568 rtx addr
= XEXP (stack
, 0);
37569 int strict_p
= reload_completed
;
37571 if (!legitimate_indirect_address_p (addr
, strict_p
))
37574 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37575 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37577 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37578 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37584 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37585 convert to such a form to deal with memory reference instructions
37586 like STFIWX and LDBRX that only take reg+reg addressing. */
37589 rs6000_force_indexed_or_indirect_mem (rtx x
)
37591 machine_mode mode
= GET_MODE (x
);
37593 gcc_assert (MEM_P (x
));
37594 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x
, mode
))
37596 rtx addr
= XEXP (x
, 0);
37597 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37599 rtx reg
= XEXP (addr
, 0);
37600 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37601 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37602 gcc_assert (REG_P (reg
));
37603 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37606 else if (GET_CODE (addr
) == PRE_MODIFY
)
37608 rtx reg
= XEXP (addr
, 0);
37609 rtx expr
= XEXP (addr
, 1);
37610 gcc_assert (REG_P (reg
));
37611 gcc_assert (GET_CODE (expr
) == PLUS
);
37612 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37616 x
= replace_equiv_address (x
, force_reg (Pmode
, addr
));
37622 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37624 On the RS/6000, all integer constants are acceptable, most won't be valid
37625 for particular insns, though. Only easy FP constants are acceptable. */
37628 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37630 if (TARGET_ELF
&& tls_referenced_p (x
))
37633 if (CONST_DOUBLE_P (x
))
37634 return easy_fp_constant (x
, mode
);
37636 if (GET_CODE (x
) == CONST_VECTOR
)
37637 return easy_vector_constant (x
, mode
);
37643 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37646 chain_already_loaded (rtx_insn
*last
)
37648 for (; last
!= NULL
; last
= PREV_INSN (last
))
37650 if (NONJUMP_INSN_P (last
))
37652 rtx patt
= PATTERN (last
);
37654 if (GET_CODE (patt
) == SET
)
37656 rtx lhs
= XEXP (patt
, 0);
37658 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
37666 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37669 rs6000_call_aix (rtx value
, rtx func_desc
, rtx tlsarg
, rtx cookie
)
37671 rtx func
= func_desc
;
37672 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
37673 rtx toc_load
= NULL_RTX
;
37674 rtx toc_restore
= NULL_RTX
;
37676 rtx abi_reg
= NULL_RTX
;
37682 tlsarg
= global_tlsarg
;
37684 /* Handle longcall attributes. */
37685 if ((INTVAL (cookie
) & CALL_LONG
) != 0
37686 && GET_CODE (func_desc
) == SYMBOL_REF
)
37687 func
= rs6000_longcall_ref (func_desc
, tlsarg
);
37689 /* Handle indirect calls. */
37690 if (GET_CODE (func
) != SYMBOL_REF
37691 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func
)))
37693 /* Save the TOC into its reserved slot before the call,
37694 and prepare to restore it after the call. */
37695 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
37696 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
37697 gen_rtvec (1, stack_toc_offset
),
37699 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
37701 /* Can we optimize saving the TOC in the prologue or
37702 do we need to do it at every call? */
37703 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
37704 cfun
->machine
->save_toc_in_prologue
= true;
37707 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
37708 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
37709 gen_rtx_PLUS (Pmode
, stack_ptr
,
37710 stack_toc_offset
));
37711 MEM_VOLATILE_P (stack_toc_mem
) = 1;
37713 && TARGET_TLS_MARKERS
37714 && DEFAULT_ABI
== ABI_ELFv2
37715 && GET_CODE (func_desc
) == SYMBOL_REF
)
37717 rtvec v
= gen_rtvec (3, toc_reg
, func_desc
, tlsarg
);
37718 rtx mark_toc_reg
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37719 emit_insn (gen_rtx_SET (stack_toc_mem
, mark_toc_reg
));
37722 emit_move_insn (stack_toc_mem
, toc_reg
);
37725 if (DEFAULT_ABI
== ABI_ELFv2
)
37727 /* A function pointer in the ELFv2 ABI is just a plain address, but
37728 the ABI requires it to be loaded into r12 before the call. */
37729 func_addr
= gen_rtx_REG (Pmode
, 12);
37730 if (!rtx_equal_p (func_addr
, func
))
37731 emit_move_insn (func_addr
, func
);
37732 abi_reg
= func_addr
;
37733 /* Indirect calls via CTR are strongly preferred over indirect
37734 calls via LR, so move the address there. Needed to mark
37735 this insn for linker plt sequence editing too. */
37736 func_addr
= gen_rtx_REG (Pmode
, CTR_REGNO
);
37738 && TARGET_TLS_MARKERS
37739 && GET_CODE (func_desc
) == SYMBOL_REF
)
37741 rtvec v
= gen_rtvec (3, abi_reg
, func_desc
, tlsarg
);
37742 rtx mark_func
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37743 emit_insn (gen_rtx_SET (func_addr
, mark_func
));
37744 v
= gen_rtvec (2, func_addr
, func_desc
);
37745 func_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37748 emit_move_insn (func_addr
, abi_reg
);
37752 /* A function pointer under AIX is a pointer to a data area whose
37753 first word contains the actual address of the function, whose
37754 second word contains a pointer to its TOC, and whose third word
37755 contains a value to place in the static chain register (r11).
37756 Note that if we load the static chain, our "trampoline" need
37757 not have any executable code. */
37759 /* Load up address of the actual function. */
37760 func
= force_reg (Pmode
, func
);
37761 func_addr
= gen_reg_rtx (Pmode
);
37762 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func
));
37764 /* Indirect calls via CTR are strongly preferred over indirect
37765 calls via LR, so move the address there. */
37766 rtx ctr_reg
= gen_rtx_REG (Pmode
, CTR_REGNO
);
37767 emit_move_insn (ctr_reg
, func_addr
);
37768 func_addr
= ctr_reg
;
37770 /* Prepare to load the TOC of the called function. Note that the
37771 TOC load must happen immediately before the actual call so
37772 that unwinding the TOC registers works correctly. See the
37773 comment in frob_update_context. */
37774 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
37775 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
37776 gen_rtx_PLUS (Pmode
, func
,
37778 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
37780 /* If we have a static chain, load it up. But, if the call was
37781 originally direct, the 3rd word has not been written since no
37782 trampoline has been built, so we ought not to load it, lest we
37783 override a static chain value. */
37784 if (!(GET_CODE (func_desc
) == SYMBOL_REF
37785 && SYMBOL_REF_FUNCTION_P (func_desc
))
37786 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37787 && !chain_already_loaded (get_current_sequence ()->next
->last
))
37789 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
37790 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
37791 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
37792 gen_rtx_PLUS (Pmode
, func
,
37794 emit_move_insn (sc_reg
, func_sc_mem
);
37801 /* Direct calls use the TOC: for local calls, the callee will
37802 assume the TOC register is set; for non-local calls, the
37803 PLT stub needs the TOC register. */
37808 /* Create the call. */
37809 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), tlsarg
);
37810 if (value
!= NULL_RTX
)
37811 call
[0] = gen_rtx_SET (value
, call
[0]);
37815 call
[n_call
++] = toc_load
;
37817 call
[n_call
++] = toc_restore
;
37819 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
37821 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
37822 insn
= emit_call_insn (insn
);
37824 /* Mention all registers defined by the ABI to hold information
37825 as uses in CALL_INSN_FUNCTION_USAGE. */
37827 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
37830 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37833 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx tlsarg
, rtx cookie
)
37838 gcc_assert (INTVAL (cookie
) == 0);
37841 tlsarg
= global_tlsarg
;
37843 /* Create the call. */
37844 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), tlsarg
);
37845 if (value
!= NULL_RTX
)
37846 call
[0] = gen_rtx_SET (value
, call
[0]);
37848 call
[1] = simple_return_rtx
;
37850 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
37851 insn
= emit_call_insn (insn
);
37853 /* Note use of the TOC register. */
37854 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
37857 /* Expand code to perform a call under the SYSV4 ABI. */
37860 rs6000_call_sysv (rtx value
, rtx func_desc
, rtx tlsarg
, rtx cookie
)
37862 rtx func
= func_desc
;
37866 rtx abi_reg
= NULL_RTX
;
37869 tlsarg
= global_tlsarg
;
37871 /* Handle longcall attributes. */
37872 if ((INTVAL (cookie
) & CALL_LONG
) != 0
37873 && GET_CODE (func_desc
) == SYMBOL_REF
)
37875 func
= rs6000_longcall_ref (func_desc
, tlsarg
);
37876 /* If the longcall was implemented using PLT16 relocs, then r11
37877 needs to be valid at the call for lazy linking. */
37879 && TARGET_TLS_MARKERS
)
37883 /* Handle indirect calls. */
37884 if (GET_CODE (func
) != SYMBOL_REF
)
37886 func
= force_reg (Pmode
, func
);
37888 /* Indirect calls via CTR are strongly preferred over indirect
37889 calls via LR, so move the address there. Needed to mark
37890 this insn for linker plt sequence editing too. */
37891 func_addr
= gen_rtx_REG (Pmode
, CTR_REGNO
);
37893 && TARGET_TLS_MARKERS
37894 && GET_CODE (func_desc
) == SYMBOL_REF
)
37896 rtvec v
= gen_rtvec (3, func
, func_desc
, tlsarg
);
37897 rtx mark_func
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37898 emit_insn (gen_rtx_SET (func_addr
, mark_func
));
37899 v
= gen_rtvec (2, func_addr
, func_desc
);
37900 func_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37903 emit_move_insn (func_addr
, func
);
37908 /* Create the call. */
37909 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), tlsarg
);
37910 if (value
!= NULL_RTX
)
37911 call
[0] = gen_rtx_SET (value
, call
[0]);
37913 unsigned int mask
= CALL_V4_SET_FP_ARGS
| CALL_V4_CLEAR_FP_ARGS
;
37914 call
[1] = gen_rtx_USE (VOIDmode
, GEN_INT (INTVAL (cookie
) & mask
));
37916 call
[2] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
37918 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (3, call
));
37919 insn
= emit_call_insn (insn
);
37921 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
37924 /* Expand code to perform a sibling call under the SysV4 ABI. */
37927 rs6000_sibcall_sysv (rtx value
, rtx func_desc
, rtx tlsarg
, rtx cookie
)
37929 rtx func
= func_desc
;
37933 rtx abi_reg
= NULL_RTX
;
37936 tlsarg
= global_tlsarg
;
37938 /* Handle longcall attributes. */
37939 if ((INTVAL (cookie
) & CALL_LONG
) != 0
37940 && GET_CODE (func_desc
) == SYMBOL_REF
)
37942 func
= rs6000_longcall_ref (func_desc
, tlsarg
);
37943 /* If the longcall was implemented using PLT16 relocs, then r11
37944 needs to be valid at the call for lazy linking. */
37946 && TARGET_TLS_MARKERS
)
37950 /* Handle indirect calls. */
37951 if (GET_CODE (func
) != SYMBOL_REF
)
37953 func
= force_reg (Pmode
, func
);
37955 /* Indirect sibcalls must go via CTR. Needed to mark
37956 this insn for linker plt sequence editing too. */
37957 func_addr
= gen_rtx_REG (Pmode
, CTR_REGNO
);
37959 && TARGET_TLS_MARKERS
37960 && GET_CODE (func_desc
) == SYMBOL_REF
)
37962 rtvec v
= gen_rtvec (3, func
, func_desc
, tlsarg
);
37963 rtx mark_func
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37964 emit_insn (gen_rtx_SET (func_addr
, mark_func
));
37965 v
= gen_rtvec (2, func_addr
, func_desc
);
37966 func_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_PLTSEQ
);
37969 emit_move_insn (func_addr
, func
);
37974 /* Create the call. */
37975 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), tlsarg
);
37976 if (value
!= NULL_RTX
)
37977 call
[0] = gen_rtx_SET (value
, call
[0]);
37979 unsigned int mask
= CALL_V4_SET_FP_ARGS
| CALL_V4_CLEAR_FP_ARGS
;
37980 call
[1] = gen_rtx_USE (VOIDmode
, GEN_INT (INTVAL (cookie
) & mask
));
37981 call
[2] = simple_return_rtx
;
37983 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (3, call
));
37984 insn
= emit_call_insn (insn
);
37986 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
37989 /* Return whether we need to always update the saved TOC pointer when we update
37990 the stack pointer. */
37993 rs6000_save_toc_in_prologue_p (void)
37995 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
37998 #ifdef HAVE_GAS_HIDDEN
37999 # define USE_HIDDEN_LINKONCE 1
38001 # define USE_HIDDEN_LINKONCE 0
38004 /* Fills in the label name that should be used for a 476 link stack thunk. */
38007 get_ppc476_thunk_name (char name
[32])
38009 gcc_assert (TARGET_LINK_STACK
);
38011 if (USE_HIDDEN_LINKONCE
)
38012 sprintf (name
, "__ppc476.get_thunk");
38014 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
38017 /* This function emits the simple thunk routine that is used to preserve
38018 the link stack on the 476 cpu. */
38020 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
38022 rs6000_code_end (void)
38027 if (!TARGET_LINK_STACK
)
38030 get_ppc476_thunk_name (name
);
38032 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
38033 build_function_type_list (void_type_node
, NULL_TREE
));
38034 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
38035 NULL_TREE
, void_type_node
);
38036 TREE_PUBLIC (decl
) = 1;
38037 TREE_STATIC (decl
) = 1;
38040 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
38042 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
38043 targetm
.asm_out
.unique_section (decl
, 0);
38044 switch_to_section (get_named_section (decl
, NULL
, 0));
38045 DECL_WEAK (decl
) = 1;
38046 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
38047 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
38048 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
38049 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
38054 switch_to_section (text_section
);
38055 ASM_OUTPUT_LABEL (asm_out_file
, name
);
38058 DECL_INITIAL (decl
) = make_node (BLOCK
);
38059 current_function_decl
= decl
;
38060 allocate_struct_function (decl
, false);
38061 init_function_start (decl
);
38062 first_function_block_is_cold
= false;
38063 /* Make sure unwind info is emitted for the thunk if needed. */
38064 final_start_function (emit_barrier (), asm_out_file
, 1);
38066 fputs ("\tblr\n", asm_out_file
);
38068 final_end_function ();
38069 init_insn_lengths ();
38070 free_after_compilation (cfun
);
38072 current_function_decl
= NULL
;
38075 /* Add r30 to hard reg set if the prologue sets it up and it is not
38076 pic_offset_table_rtx. */
38079 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
38081 if (!TARGET_SINGLE_PIC_BASE
38083 && TARGET_MINIMAL_TOC
38084 && !constant_pool_empty_p ())
38085 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
38086 if (cfun
->machine
->split_stack_argp_used
)
38087 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
38089 /* Make sure the hard reg set doesn't include r2, which was possibly added
38090 via PIC_OFFSET_TABLE_REGNUM. */
38092 remove_from_hard_reg_set (&set
->set
, Pmode
, TOC_REGNUM
);
38096 /* Helper function for rs6000_split_logical to emit a logical instruction after
38097 spliting the operation to single GPR registers.
38099 DEST is the destination register.
38100 OP1 and OP2 are the input source registers.
38101 CODE is the base operation (AND, IOR, XOR, NOT).
38102 MODE is the machine mode.
38103 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38104 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38105 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38108 rs6000_split_logical_inner (rtx dest
,
38111 enum rtx_code code
,
38113 bool complement_final_p
,
38114 bool complement_op1_p
,
38115 bool complement_op2_p
)
38119 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38120 if (op2
&& GET_CODE (op2
) == CONST_INT
38121 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
38122 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38124 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
38125 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
38127 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38132 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
38136 else if (value
== mask
)
38138 if (!rtx_equal_p (dest
, op1
))
38139 emit_insn (gen_rtx_SET (dest
, op1
));
38144 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38145 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38146 else if (code
== IOR
|| code
== XOR
)
38150 if (!rtx_equal_p (dest
, op1
))
38151 emit_insn (gen_rtx_SET (dest
, op1
));
38157 if (code
== AND
&& mode
== SImode
38158 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
38160 emit_insn (gen_andsi3 (dest
, op1
, op2
));
38164 if (complement_op1_p
)
38165 op1
= gen_rtx_NOT (mode
, op1
);
38167 if (complement_op2_p
)
38168 op2
= gen_rtx_NOT (mode
, op2
);
38170 /* For canonical RTL, if only one arm is inverted it is the first. */
38171 if (!complement_op1_p
&& complement_op2_p
)
38172 std::swap (op1
, op2
);
38174 bool_rtx
= ((code
== NOT
)
38175 ? gen_rtx_NOT (mode
, op1
)
38176 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
38178 if (complement_final_p
)
38179 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
38181 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
38184 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38185 operations are split immediately during RTL generation to allow for more
38186 optimizations of the AND/IOR/XOR.
38188 OPERANDS is an array containing the destination and two input operands.
38189 CODE is the base operation (AND, IOR, XOR, NOT).
38190 MODE is the machine mode.
38191 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38192 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38193 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38194 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38195 formation of the AND instructions. */
38198 rs6000_split_logical_di (rtx operands
[3],
38199 enum rtx_code code
,
38200 bool complement_final_p
,
38201 bool complement_op1_p
,
38202 bool complement_op2_p
)
38204 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
38205 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
38206 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
38207 enum hi_lo
{ hi
= 0, lo
= 1 };
38208 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
38211 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
38212 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
38213 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
38214 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
38217 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
38220 if (GET_CODE (operands
[2]) != CONST_INT
)
38222 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
38223 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
38227 HOST_WIDE_INT value
= INTVAL (operands
[2]);
38228 HOST_WIDE_INT value_hi_lo
[2];
38230 gcc_assert (!complement_final_p
);
38231 gcc_assert (!complement_op1_p
);
38232 gcc_assert (!complement_op2_p
);
38234 value_hi_lo
[hi
] = value
>> 32;
38235 value_hi_lo
[lo
] = value
& lower_32bits
;
38237 for (i
= 0; i
< 2; i
++)
38239 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
38241 if (sub_value
& sign_bit
)
38242 sub_value
|= upper_32bits
;
38244 op2_hi_lo
[i
] = GEN_INT (sub_value
);
38246 /* If this is an AND instruction, check to see if we need to load
38247 the value in a register. */
38248 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
38249 && !and_operand (op2_hi_lo
[i
], SImode
))
38250 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
38255 for (i
= 0; i
< 2; i
++)
38257 /* Split large IOR/XOR operations. */
38258 if ((code
== IOR
|| code
== XOR
)
38259 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
38260 && !complement_final_p
38261 && !complement_op1_p
38262 && !complement_op2_p
38263 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
38265 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
38266 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
38267 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
38268 rtx tmp
= gen_reg_rtx (SImode
);
38270 /* Make sure the constant is sign extended. */
38271 if ((hi_16bits
& sign_bit
) != 0)
38272 hi_16bits
|= upper_32bits
;
38274 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
38275 code
, SImode
, false, false, false);
38277 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
38278 code
, SImode
, false, false, false);
38281 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
38282 code
, SImode
, complement_final_p
,
38283 complement_op1_p
, complement_op2_p
);
38289 /* Split the insns that make up boolean operations operating on multiple GPR
38290 registers. The boolean MD patterns ensure that the inputs either are
38291 exactly the same as the output registers, or there is no overlap.
38293 OPERANDS is an array containing the destination and two input operands.
38294 CODE is the base operation (AND, IOR, XOR, NOT).
38295 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38296 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38297 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38300 rs6000_split_logical (rtx operands
[3],
38301 enum rtx_code code
,
38302 bool complement_final_p
,
38303 bool complement_op1_p
,
38304 bool complement_op2_p
)
38306 machine_mode mode
= GET_MODE (operands
[0]);
38307 machine_mode sub_mode
;
38309 int sub_size
, regno0
, regno1
, nregs
, i
;
38311 /* If this is DImode, use the specialized version that can run before
38312 register allocation. */
38313 if (mode
== DImode
&& !TARGET_POWERPC64
)
38315 rs6000_split_logical_di (operands
, code
, complement_final_p
,
38316 complement_op1_p
, complement_op2_p
);
38322 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
38323 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
38324 sub_size
= GET_MODE_SIZE (sub_mode
);
38325 regno0
= REGNO (op0
);
38326 regno1
= REGNO (op1
);
38328 gcc_assert (reload_completed
);
38329 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38330 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38332 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
38333 gcc_assert (nregs
> 1);
38335 if (op2
&& REG_P (op2
))
38336 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
38338 for (i
= 0; i
< nregs
; i
++)
38340 int offset
= i
* sub_size
;
38341 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
38342 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
38343 rtx sub_op2
= ((code
== NOT
)
38345 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
38347 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
38348 complement_final_p
, complement_op1_p
,
38356 /* Return true if the peephole2 can combine a load involving a combination of
38357 an addis instruction and a load with an offset that can be fused together on
38361 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
38362 rtx addis_value
, /* addis value. */
38363 rtx target
, /* target register that is loaded. */
38364 rtx mem
) /* bottom part of the memory addr. */
38369 /* Validate arguments. */
38370 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38373 if (!base_reg_operand (target
, GET_MODE (target
)))
38376 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38379 /* Allow sign/zero extension. */
38380 if (GET_CODE (mem
) == ZERO_EXTEND
38381 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
38382 mem
= XEXP (mem
, 0);
38387 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
38390 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38391 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
38394 /* Validate that the register used to load the high value is either the
38395 register being loaded, or we can safely replace its use.
38397 This function is only called from the peephole2 pass and we assume that
38398 there are 2 instructions in the peephole (addis and load), so we want to
38399 check if the target register was not used in the memory address and the
38400 register to hold the addis result is dead after the peephole. */
38401 if (REGNO (addis_reg
) != REGNO (target
))
38403 if (reg_mentioned_p (target
, mem
))
38406 if (!peep2_reg_dead_p (2, addis_reg
))
38409 /* If the target register being loaded is the stack pointer, we must
38410 avoid loading any other value into it, even temporarily. */
38411 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
38415 base_reg
= XEXP (addr
, 0);
38416 return REGNO (addis_reg
) == REGNO (base_reg
);
38419 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38420 sequence. We adjust the addis register to use the target register. If the
38421 load sign extends, we adjust the code to do the zero extending load, and an
38422 explicit sign extension later since the fusion only covers zero extending
38426 operands[0] register set with addis (to be replaced with target)
38427 operands[1] value set via addis
38428 operands[2] target register being loaded
38429 operands[3] D-form memory reference using operands[0]. */
38432 expand_fusion_gpr_load (rtx
*operands
)
38434 rtx addis_value
= operands
[1];
38435 rtx target
= operands
[2];
38436 rtx orig_mem
= operands
[3];
38437 rtx new_addr
, new_mem
, orig_addr
, offset
;
38438 enum rtx_code plus_or_lo_sum
;
38439 machine_mode target_mode
= GET_MODE (target
);
38440 machine_mode extend_mode
= target_mode
;
38441 machine_mode ptr_mode
= Pmode
;
38442 enum rtx_code extend
= UNKNOWN
;
38444 if (GET_CODE (orig_mem
) == ZERO_EXTEND
38445 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
38447 extend
= GET_CODE (orig_mem
);
38448 orig_mem
= XEXP (orig_mem
, 0);
38449 target_mode
= GET_MODE (orig_mem
);
38452 gcc_assert (MEM_P (orig_mem
));
38454 orig_addr
= XEXP (orig_mem
, 0);
38455 plus_or_lo_sum
= GET_CODE (orig_addr
);
38456 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38458 offset
= XEXP (orig_addr
, 1);
38459 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38460 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38462 if (extend
!= UNKNOWN
)
38463 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
38465 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38466 UNSPEC_FUSION_GPR
);
38467 emit_insn (gen_rtx_SET (target
, new_mem
));
38469 if (extend
== SIGN_EXTEND
)
38471 int sub_off
= ((BYTES_BIG_ENDIAN
)
38472 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
38475 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
38477 emit_insn (gen_rtx_SET (target
,
38478 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
38484 /* Emit the addis instruction that will be part of a fused instruction
38488 emit_fusion_addis (rtx target
, rtx addis_value
)
38491 const char *addis_str
= NULL
;
38493 /* Emit the addis instruction. */
38494 fuse_ops
[0] = target
;
38495 if (satisfies_constraint_L (addis_value
))
38497 fuse_ops
[1] = addis_value
;
38498 addis_str
= "lis %0,%v1";
38501 else if (GET_CODE (addis_value
) == PLUS
)
38503 rtx op0
= XEXP (addis_value
, 0);
38504 rtx op1
= XEXP (addis_value
, 1);
38506 if (REG_P (op0
) && CONST_INT_P (op1
)
38507 && satisfies_constraint_L (op1
))
38511 addis_str
= "addis %0,%1,%v2";
38515 else if (GET_CODE (addis_value
) == HIGH
)
38517 rtx value
= XEXP (addis_value
, 0);
38518 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
38520 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
38521 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
38523 addis_str
= "addis %0,%2,%1@toc@ha";
38525 else if (TARGET_XCOFF
)
38526 addis_str
= "addis %0,%1@u(%2)";
38529 gcc_unreachable ();
38532 else if (GET_CODE (value
) == PLUS
)
38534 rtx op0
= XEXP (value
, 0);
38535 rtx op1
= XEXP (value
, 1);
38537 if (GET_CODE (op0
) == UNSPEC
38538 && XINT (op0
, 1) == UNSPEC_TOCREL
38539 && CONST_INT_P (op1
))
38541 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
38542 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
38545 addis_str
= "addis %0,%2,%1+%3@toc@ha";
38547 else if (TARGET_XCOFF
)
38548 addis_str
= "addis %0,%1+%3@u(%2)";
38551 gcc_unreachable ();
38555 else if (satisfies_constraint_L (value
))
38557 fuse_ops
[1] = value
;
38558 addis_str
= "lis %0,%v1";
38561 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
38563 fuse_ops
[1] = value
;
38564 addis_str
= "lis %0,%1@ha";
38569 fatal_insn ("Could not generate addis value for fusion", addis_value
);
38571 output_asm_insn (addis_str
, fuse_ops
);
38574 /* Emit a D-form load or store instruction that is the second instruction
38575 of a fusion sequence. */
38578 emit_fusion_load (rtx load_reg
, rtx addis_reg
, rtx offset
, const char *insn_str
)
38581 char insn_template
[80];
38583 fuse_ops
[0] = load_reg
;
38584 fuse_ops
[1] = addis_reg
;
38586 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
38588 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
38589 fuse_ops
[2] = offset
;
38590 output_asm_insn (insn_template
, fuse_ops
);
38593 else if (GET_CODE (offset
) == UNSPEC
38594 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38597 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38599 else if (TARGET_XCOFF
)
38600 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38603 gcc_unreachable ();
38605 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38606 output_asm_insn (insn_template
, fuse_ops
);
38609 else if (GET_CODE (offset
) == PLUS
38610 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38611 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38612 && CONST_INT_P (XEXP (offset
, 1)))
38614 rtx tocrel_unspec
= XEXP (offset
, 0);
38616 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38618 else if (TARGET_XCOFF
)
38619 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38622 gcc_unreachable ();
38624 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38625 fuse_ops
[3] = XEXP (offset
, 1);
38626 output_asm_insn (insn_template
, fuse_ops
);
38629 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38631 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38633 fuse_ops
[2] = offset
;
38634 output_asm_insn (insn_template
, fuse_ops
);
38638 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38643 /* Given an address, convert it into the addis and load offset parts. Addresses
38644 created during the peephole2 process look like:
38645 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38646 (unspec [(...)] UNSPEC_TOCREL)) */
38649 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38653 if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38655 hi
= XEXP (addr
, 0);
38656 lo
= XEXP (addr
, 1);
38659 gcc_unreachable ();
38665 /* Return a string to fuse an addis instruction with a gpr load to the same
38666 register that we loaded up the addis instruction. The address that is used
38667 is the logical address that was formed during peephole2:
38668 (lo_sum (high) (low-part))
38670 The code is complicated, so we call output_asm_insn directly, and just
38674 emit_fusion_gpr_load (rtx target
, rtx mem
)
38679 const char *load_str
= NULL
;
38682 if (GET_CODE (mem
) == ZERO_EXTEND
)
38683 mem
= XEXP (mem
, 0);
38685 gcc_assert (REG_P (target
) && MEM_P (mem
));
38687 addr
= XEXP (mem
, 0);
38688 fusion_split_address (addr
, &addis_value
, &load_offset
);
38690 /* Now emit the load instruction to the same register. */
38691 mode
= GET_MODE (mem
);
38709 gcc_assert (TARGET_POWERPC64
);
38714 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38717 /* Emit the addis instruction. */
38718 emit_fusion_addis (target
, addis_value
);
38720 /* Emit the D-form load instruction. */
38721 emit_fusion_load (target
, target
, load_offset
, load_str
);
38727 #ifdef RS6000_GLIBC_ATOMIC_FENV
38728 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38729 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
38732 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38735 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
38737 if (!TARGET_HARD_FLOAT
)
38739 #ifdef RS6000_GLIBC_ATOMIC_FENV
38740 if (atomic_hold_decl
== NULL_TREE
)
38743 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38744 get_identifier ("__atomic_feholdexcept"),
38745 build_function_type_list (void_type_node
,
38746 double_ptr_type_node
,
38748 TREE_PUBLIC (atomic_hold_decl
) = 1;
38749 DECL_EXTERNAL (atomic_hold_decl
) = 1;
38752 if (atomic_clear_decl
== NULL_TREE
)
38755 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38756 get_identifier ("__atomic_feclearexcept"),
38757 build_function_type_list (void_type_node
,
38759 TREE_PUBLIC (atomic_clear_decl
) = 1;
38760 DECL_EXTERNAL (atomic_clear_decl
) = 1;
38763 tree const_double
= build_qualified_type (double_type_node
,
38765 tree const_double_ptr
= build_pointer_type (const_double
);
38766 if (atomic_update_decl
== NULL_TREE
)
38769 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38770 get_identifier ("__atomic_feupdateenv"),
38771 build_function_type_list (void_type_node
,
38774 TREE_PUBLIC (atomic_update_decl
) = 1;
38775 DECL_EXTERNAL (atomic_update_decl
) = 1;
38778 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38779 TREE_ADDRESSABLE (fenv_var
) = 1;
38780 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
38782 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
38783 *clear
= build_call_expr (atomic_clear_decl
, 0);
38784 *update
= build_call_expr (atomic_update_decl
, 1,
38785 fold_convert (const_double_ptr
, fenv_addr
));
38790 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
38791 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
38792 tree call_mffs
= build_call_expr (mffs
, 0);
38794 /* Generates the equivalent of feholdexcept (&fenv_var)
38796 *fenv_var = __builtin_mffs ();
38798 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38799 __builtin_mtfsf (0xff, fenv_hold); */
38801 /* Mask to clear everything except for the rounding modes and non-IEEE
38802 arithmetic flag. */
38803 const unsigned HOST_WIDE_INT hold_exception_mask
=
38804 HOST_WIDE_INT_C (0xffffffff00000007);
38806 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38808 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
38810 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
38811 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38812 build_int_cst (uint64_type_node
,
38813 hold_exception_mask
));
38815 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38818 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
38819 build_int_cst (unsigned_type_node
, 0xff),
38822 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
38824 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38826 double fenv_clear = __builtin_mffs ();
38827 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38828 __builtin_mtfsf (0xff, fenv_clear); */
38830 /* Mask to clear everything except for the rounding modes and non-IEEE
38831 arithmetic flag. */
38832 const unsigned HOST_WIDE_INT clear_exception_mask
=
38833 HOST_WIDE_INT_C (0xffffffff00000000);
38835 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
38837 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
38839 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
38840 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
38842 build_int_cst (uint64_type_node
,
38843 clear_exception_mask
));
38845 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38846 fenv_clear_llu_and
);
38848 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
38849 build_int_cst (unsigned_type_node
, 0xff),
38852 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
38854 /* Generates the equivalent of feupdateenv (&fenv_var)
38856 double old_fenv = __builtin_mffs ();
38857 double fenv_update;
38858 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38859 (*(uint64_t*)fenv_var 0x1ff80fff);
38860 __builtin_mtfsf (0xff, fenv_update); */
38862 const unsigned HOST_WIDE_INT update_exception_mask
=
38863 HOST_WIDE_INT_C (0xffffffff1fffff00);
38864 const unsigned HOST_WIDE_INT new_exception_mask
=
38865 HOST_WIDE_INT_C (0x1ff80fff);
38867 tree old_fenv
= create_tmp_var_raw (double_type_node
);
38868 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
38870 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
38871 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
38872 build_int_cst (uint64_type_node
,
38873 update_exception_mask
));
38875 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38876 build_int_cst (uint64_type_node
,
38877 new_exception_mask
));
38879 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
38880 old_llu_and
, new_llu_and
);
38882 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38885 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
38886 build_int_cst (unsigned_type_node
, 0xff),
38887 fenv_update_mtfsf
);
38889 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
38893 rs6000_generate_float2_double_code (rtx dst
, rtx src1
, rtx src2
)
38895 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38897 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
38898 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
38900 /* The destination of the vmrgew instruction layout is:
38901 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38902 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38903 vmrgew instruction will be correct. */
38904 if (BYTES_BIG_ENDIAN
)
38906 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0
, src1
, src2
,
38908 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1
, src1
, src2
,
38913 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
38914 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
38917 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
38918 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
38920 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2
, rtx_tmp0
));
38921 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3
, rtx_tmp1
));
38923 if (BYTES_BIG_ENDIAN
)
38924 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
38926 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
38930 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
38932 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38934 rtx_tmp0
= gen_reg_rtx (V2DImode
);
38935 rtx_tmp1
= gen_reg_rtx (V2DImode
);
38937 /* The destination of the vmrgew instruction layout is:
38938 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38939 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38940 vmrgew instruction will be correct. */
38941 if (BYTES_BIG_ENDIAN
)
38943 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38944 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38948 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
38949 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
38952 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
38953 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
38955 if (signed_convert
)
38957 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
38958 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
38962 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
38963 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
38966 if (BYTES_BIG_ENDIAN
)
38967 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
38969 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
38973 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
38976 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38978 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
38979 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
38981 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38982 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38984 rtx_tmp2
= gen_reg_rtx (V4SImode
);
38985 rtx_tmp3
= gen_reg_rtx (V4SImode
);
38987 if (signed_convert
)
38989 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
38990 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
38994 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
38995 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
38998 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
39001 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39004 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
39005 optimization_type opt_type
)
39010 return (opt_type
== OPTIMIZE_FOR_SPEED
39011 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
39018 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39020 static HOST_WIDE_INT
39021 rs6000_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
39023 if (TREE_CODE (exp
) == STRING_CST
39024 && (STRICT_ALIGNMENT
|| !optimize_size
))
39025 return MAX (align
, BITS_PER_WORD
);
39029 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39031 static HOST_WIDE_INT
39032 rs6000_starting_frame_offset (void)
39034 if (FRAME_GROWS_DOWNWARD
)
39036 return RS6000_STARTING_FRAME_OFFSET
;
39040 /* Create an alias for a mangled name where we have changed the mangling (in
39041 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39042 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39044 #if TARGET_ELF && RS6000_WEAK
39046 rs6000_globalize_decl_name (FILE * stream
, tree decl
)
39048 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
39050 targetm
.asm_out
.globalize_label (stream
, name
);
39052 if (rs6000_passes_ieee128
&& name
[0] == '_' && name
[1] == 'Z')
39054 tree save_asm_name
= DECL_ASSEMBLER_NAME (decl
);
39055 const char *old_name
;
39057 ieee128_mangling_gcc_8_1
= true;
39058 lang_hooks
.set_decl_assembler_name (decl
);
39059 old_name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
39060 SET_DECL_ASSEMBLER_NAME (decl
, save_asm_name
);
39061 ieee128_mangling_gcc_8_1
= false;
39063 if (strcmp (name
, old_name
) != 0)
39065 fprintf (stream
, "\t.weak %s\n", old_name
);
39066 fprintf (stream
, "\t.set %s,%s\n", old_name
, name
);
39073 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39074 function names from <foo>l to <foo>f128 if the default long double type is
39075 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39076 include file switches the names on systems that support long double as IEEE
39077 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39078 In the future, glibc will export names like __ieee128_sinf128 and we can
39079 switch to using those instead of using sinf128, which pollutes the user's
39082 This will switch the names for Fortran math functions as well (which doesn't
39083 use math.h). However, Fortran needs other changes to the compiler and
39084 library before you can switch the real*16 type at compile time.
39086 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39087 only do this if the default is that long double is IBM extended double, and
39088 the user asked for IEEE 128-bit. */
39091 rs6000_mangle_decl_assembler_name (tree decl
, tree id
)
39093 if (!TARGET_IEEEQUAD_DEFAULT
&& TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
39094 && TREE_CODE (decl
) == FUNCTION_DECL
&& DECL_IS_BUILTIN (decl
) )
39096 size_t len
= IDENTIFIER_LENGTH (id
);
39097 const char *name
= IDENTIFIER_POINTER (id
);
39099 if (name
[len
- 1] == 'l')
39101 bool uses_ieee128_p
= false;
39102 tree type
= TREE_TYPE (decl
);
39103 machine_mode ret_mode
= TYPE_MODE (type
);
39105 /* See if the function returns a IEEE 128-bit floating point type or
39107 if (ret_mode
== TFmode
|| ret_mode
== TCmode
)
39108 uses_ieee128_p
= true;
39111 function_args_iterator args_iter
;
39114 /* See if the function passes a IEEE 128-bit floating point type
39115 or complex type. */
39116 FOREACH_FUNCTION_ARGS (type
, arg
, args_iter
)
39118 machine_mode arg_mode
= TYPE_MODE (arg
);
39119 if (arg_mode
== TFmode
|| arg_mode
== TCmode
)
39121 uses_ieee128_p
= true;
39127 /* If we passed or returned an IEEE 128-bit floating point type,
39128 change the name. */
39129 if (uses_ieee128_p
)
39131 char *name2
= (char *) alloca (len
+ 4);
39132 memcpy (name2
, name
, len
- 1);
39133 strcpy (name2
+ len
- 1, "f128");
39134 id
= get_identifier (name2
);
39143 struct gcc_target targetm
= TARGET_INITIALIZER
;
39145 #include "gt-rs6000.h"