1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
33 #include "stringpool.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
44 #include "fold-const.h"
46 #include "stor-layout.h"
48 #include "print-tree.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
57 #include "sched-int.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
70 #include "tree-pass.h"
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
76 #include "gstab.h" /* for N_SLINE */
78 #include "case-cfn-macros.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 /* Structure used to define the rs6000 stack */
93 typedef struct rs6000_stack
{
94 int reload_completed
; /* stack info won't change from here on */
95 int first_gp_reg_save
; /* first callee saved GP register used */
96 int first_fp_reg_save
; /* first callee saved FP register used */
97 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
98 int lr_save_p
; /* true if the link reg needs to be saved */
99 int cr_save_p
; /* true if the CR reg needs to be saved */
100 unsigned int vrsave_mask
; /* mask of vec registers to save */
101 int push_p
; /* true if we need to allocate stack space */
102 int calls_p
; /* true if the function makes any calls */
103 int world_save_p
; /* true if we're saving *everything*:
104 r13-r31, cr, f14-f31, vrsave, v20-v31 */
105 enum rs6000_abi abi
; /* which ABI to use */
106 int gp_save_offset
; /* offset to save GP regs from initial SP */
107 int fp_save_offset
; /* offset to save FP regs from initial SP */
108 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
109 int lr_save_offset
; /* offset to save LR from initial SP */
110 int cr_save_offset
; /* offset to save CR from initial SP */
111 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
112 int varargs_save_offset
; /* offset to save the varargs registers */
113 int ehrd_offset
; /* offset to EH return data */
114 int ehcr_offset
; /* offset to EH CR field data */
115 int reg_size
; /* register size (4 or 8) */
116 HOST_WIDE_INT vars_size
; /* variable save area size */
117 int parm_size
; /* outgoing parameter size */
118 int save_size
; /* save area size */
119 int fixed_size
; /* fixed size of stack frame */
120 int gp_size
; /* size of saved GP registers */
121 int fp_size
; /* size of saved FP registers */
122 int altivec_size
; /* size of saved AltiVec registers */
123 int cr_size
; /* size to hold CR if not in fixed area */
124 int vrsave_size
; /* size to hold VRSAVE */
125 int altivec_padding_size
; /* size of altivec alignment padding */
126 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
130 /* A C structure for machine-specific, per-function data.
131 This is added to the cfun structure. */
132 typedef struct GTY(()) machine_function
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame
;
136 /* Flags if __builtin_return_address (0) was used. */
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue
;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset
;
146 /* Alternative internal arg pointer for -fsplit-stack. */
147 rtx split_stack_arg_pointer
;
148 bool split_stack_argp_used
;
149 /* Flag if r2 setup is needed with ELFv2 ABI. */
150 bool r2_setup_needed
;
151 /* The number of components we use for separate shrink-wrapping. */
153 /* The components already handled by separate shrink-wrapping, which should
154 not be considered by the prologue and epilogue. */
155 bool gpr_is_wrapped_separately
[32];
156 bool fpr_is_wrapped_separately
[32];
157 bool lr_is_wrapped_separately
;
160 /* Support targetm.vectorize.builtin_mask_for_load. */
161 static GTY(()) tree altivec_builtin_mask_for_load
;
163 /* Set to nonzero once AIX common-mode calls have been defined. */
164 static GTY(()) int common_mode_defined
;
166 /* Label number of label created for -mrelocatable, to call to so we can
167 get the address of the GOT section */
168 static int rs6000_pic_labelno
;
171 /* Counter for labels which are to be placed in .fixup. */
172 int fixuplabelno
= 0;
175 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
178 /* Specify the machine mode that pointers have. After generation of rtl, the
179 compiler makes no further distinction between pointers and any other objects
180 of this machine mode. The type is unsigned since not all things that
181 include rs6000.h also include machmode.h. */
182 unsigned rs6000_pmode
;
184 /* Width in bits of a pointer. */
185 unsigned rs6000_pointer_size
;
187 #ifdef HAVE_AS_GNU_ATTRIBUTE
188 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
189 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
191 /* Flag whether floating point values have been passed/returned.
192 Note that this doesn't say whether fprs are used, since the
193 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
194 should be set for soft-float values passed in gprs and ieee128
195 values passed in vsx registers. */
196 static bool rs6000_passes_float
;
197 static bool rs6000_passes_long_double
;
198 /* Flag whether vector values have been passed/returned. */
199 static bool rs6000_passes_vector
;
200 /* Flag whether small (<= 8 byte) structures have been returned. */
201 static bool rs6000_returns_struct
;
204 /* Value is TRUE if register/mode pair is acceptable. */
205 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
207 /* Maximum number of registers needed for a given register class and mode. */
208 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
210 /* How many registers are needed for a given register and mode. */
211 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
213 /* Map register number to register class. */
214 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
216 static int dbg_cost_ctrl
;
218 /* Built in types. */
219 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
220 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
222 /* Flag to say the TOC is initialized */
223 int toc_initialized
, need_toc_init
;
224 char toc_label_name
[10];
226 /* Cached value of rs6000_variable_issue. This is cached in
227 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
228 static short cached_can_issue_more
;
230 static GTY(()) section
*read_only_data_section
;
231 static GTY(()) section
*private_data_section
;
232 static GTY(()) section
*tls_data_section
;
233 static GTY(()) section
*tls_private_data_section
;
234 static GTY(()) section
*read_only_private_data_section
;
235 static GTY(()) section
*sdata2_section
;
236 static GTY(()) section
*toc_section
;
238 struct builtin_description
240 const HOST_WIDE_INT mask
;
241 const enum insn_code icode
;
242 const char *const name
;
243 const enum rs6000_builtins code
;
246 /* Describe the vector unit used for modes. */
247 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
248 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
250 /* Register classes for various constraints that are based on the target
252 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
254 /* Describe the alignment of a vector. */
255 int rs6000_vector_align
[NUM_MACHINE_MODES
];
257 /* Map selected modes to types for builtins. */
258 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
260 /* What modes to automatically generate reciprocal divide estimate (fre) and
261 reciprocal sqrt (frsqrte) for. */
262 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
264 /* Masks to determine which reciprocal esitmate instructions to generate
266 enum rs6000_recip_mask
{
267 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
268 RECIP_DF_DIV
= 0x002,
269 RECIP_V4SF_DIV
= 0x004,
270 RECIP_V2DF_DIV
= 0x008,
272 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
273 RECIP_DF_RSQRT
= 0x020,
274 RECIP_V4SF_RSQRT
= 0x040,
275 RECIP_V2DF_RSQRT
= 0x080,
277 /* Various combination of flags for -mrecip=xxx. */
279 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
280 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
281 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
283 RECIP_HIGH_PRECISION
= RECIP_ALL
,
285 /* On low precision machines like the power5, don't enable double precision
286 reciprocal square root estimate, since it isn't accurate enough. */
287 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
290 /* -mrecip options. */
293 const char *string
; /* option name */
294 unsigned int mask
; /* mask bits to set */
295 } recip_options
[] = {
296 { "all", RECIP_ALL
},
297 { "none", RECIP_NONE
},
298 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
300 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
301 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
302 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
303 | RECIP_V2DF_RSQRT
) },
304 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
305 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
308 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
314 { "power9", PPC_PLATFORM_POWER9
},
315 { "power8", PPC_PLATFORM_POWER8
},
316 { "power7", PPC_PLATFORM_POWER7
},
317 { "power6x", PPC_PLATFORM_POWER6X
},
318 { "power6", PPC_PLATFORM_POWER6
},
319 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
320 { "power5", PPC_PLATFORM_POWER5
},
321 { "ppc970", PPC_PLATFORM_PPC970
},
322 { "power4", PPC_PLATFORM_POWER4
},
323 { "ppca2", PPC_PLATFORM_PPCA2
},
324 { "ppc476", PPC_PLATFORM_PPC476
},
325 { "ppc464", PPC_PLATFORM_PPC464
},
326 { "ppc440", PPC_PLATFORM_PPC440
},
327 { "ppc405", PPC_PLATFORM_PPC405
},
328 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
331 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
337 } cpu_supports_info
[] = {
338 /* AT_HWCAP masks. */
339 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
340 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
341 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
342 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
343 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
344 { "booke", PPC_FEATURE_BOOKE
, 0 },
345 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
346 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
347 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
348 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
349 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
350 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
351 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
352 { "notb", PPC_FEATURE_NO_TB
, 0 },
353 { "pa6t", PPC_FEATURE_PA6T
, 0 },
354 { "power4", PPC_FEATURE_POWER4
, 0 },
355 { "power5", PPC_FEATURE_POWER5
, 0 },
356 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
357 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
358 { "ppc32", PPC_FEATURE_32
, 0 },
359 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
360 { "ppc64", PPC_FEATURE_64
, 0 },
361 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
362 { "smt", PPC_FEATURE_SMT
, 0 },
363 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
364 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
365 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
366 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
368 /* AT_HWCAP2 masks. */
369 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
370 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
371 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
372 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
373 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
374 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
375 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
376 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
377 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
378 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
379 { "darn", PPC_FEATURE2_DARN
, 1 },
380 { "scv", PPC_FEATURE2_SCV
, 1 }
383 /* On PowerPC, we have a limited number of target clones that we care about
384 which means we can use an array to hold the options, rather than having more
385 elaborate data structures to identify each possible variation. Order the
386 clones from the default to the highest ISA. */
388 CLONE_DEFAULT
= 0, /* default clone. */
389 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
390 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
391 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
392 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
396 /* Map compiler ISA bits into HWCAP names. */
398 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
399 const char *name
; /* name to use in __builtin_cpu_supports. */
402 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
403 { 0, "" }, /* Default options. */
404 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
405 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
406 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
407 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
411 /* Newer LIBCs explicitly export this symbol to declare that they provide
412 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
413 reference to this symbol whenever we expand a CPU builtin, so that
414 we never link against an old LIBC. */
415 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
417 /* True if we have expanded a CPU builtin. */
420 /* Pointer to function (in rs6000-c.c) that can define or undefine target
421 macros that have changed. Languages that don't support the preprocessor
422 don't link in rs6000-c.c, so we can't call it directly. */
423 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
425 /* Simplfy register classes into simpler classifications. We assume
426 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
427 check for standard register classes (gpr/floating/altivec/vsx) and
428 floating/vector classes (float/altivec/vsx). */
430 enum rs6000_reg_type
{
441 /* Map register class to register type. */
442 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
444 /* First/last register type for the 'normal' register types (i.e. general
445 purpose, floating point, altivec, and VSX registers). */
446 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
448 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
451 /* Register classes we care about in secondary reload or go if legitimate
452 address. We only need to worry about GPR, FPR, and Altivec registers here,
453 along an ANY field that is the OR of the 3 register classes. */
455 enum rs6000_reload_reg_type
{
456 RELOAD_REG_GPR
, /* General purpose registers. */
457 RELOAD_REG_FPR
, /* Traditional floating point regs. */
458 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
459 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
463 /* For setting up register classes, loop through the 3 register classes mapping
464 into real registers, and skip the ANY class, which is just an OR of the
466 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
467 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
469 /* Map reload register type to a register in the register class. */
470 struct reload_reg_map_type
{
471 const char *name
; /* Register class name. */
472 int reg
; /* Register in the register class. */
475 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
476 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
477 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
478 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
479 { "Any", -1 }, /* RELOAD_REG_ANY. */
482 /* Mask bits for each register class, indexed per mode. Historically the
483 compiler has been more restrictive which types can do PRE_MODIFY instead of
484 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
485 typedef unsigned char addr_mask_type
;
487 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
488 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
489 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
490 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
491 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
492 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
493 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
494 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
496 /* Register type masks based on the type, of valid addressing modes. */
497 struct rs6000_reg_addr
{
498 enum insn_code reload_load
; /* INSN to reload for loading. */
499 enum insn_code reload_store
; /* INSN to reload for storing. */
500 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
501 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
502 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
503 enum insn_code fusion_gpr_ld
; /* INSN for fusing gpr ADDIS/loads. */
504 /* INSNs for fusing addi with loads
505 or stores for each reg. class. */
506 enum insn_code fusion_addi_ld
[(int)N_RELOAD_REG
];
507 enum insn_code fusion_addi_st
[(int)N_RELOAD_REG
];
508 /* INSNs for fusing addis with loads
509 or stores for each reg. class. */
510 enum insn_code fusion_addis_ld
[(int)N_RELOAD_REG
];
511 enum insn_code fusion_addis_st
[(int)N_RELOAD_REG
];
512 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
513 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
514 bool fused_toc
; /* Mode supports TOC fusion. */
517 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
519 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
521 mode_supports_pre_incdec_p (machine_mode mode
)
523 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
527 /* Helper function to say whether a mode supports PRE_MODIFY. */
529 mode_supports_pre_modify_p (machine_mode mode
)
531 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
535 /* Given that there exists at least one variable that is set (produced)
536 by OUT_INSN and read (consumed) by IN_INSN, return true iff
537 IN_INSN represents one or more memory store operations and none of
538 the variables set by OUT_INSN is used by IN_INSN as the address of a
539 store operation. If either IN_INSN or OUT_INSN does not represent
540 a "single" RTL SET expression (as loosely defined by the
541 implementation of the single_set function) or a PARALLEL with only
542 SETs, CLOBBERs, and USEs inside, this function returns false.
544 This rs6000-specific version of store_data_bypass_p checks for
545 certain conditions that result in assertion failures (and internal
546 compiler errors) in the generic store_data_bypass_p function and
547 returns false rather than calling store_data_bypass_p if one of the
548 problematic conditions is detected. */
551 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
558 in_set
= single_set (in_insn
);
561 if (MEM_P (SET_DEST (in_set
)))
563 out_set
= single_set (out_insn
);
566 out_pat
= PATTERN (out_insn
);
567 if (GET_CODE (out_pat
) == PARALLEL
)
569 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
571 out_exp
= XVECEXP (out_pat
, 0, i
);
572 if ((GET_CODE (out_exp
) == CLOBBER
)
573 || (GET_CODE (out_exp
) == USE
))
575 else if (GET_CODE (out_exp
) != SET
)
584 in_pat
= PATTERN (in_insn
);
585 if (GET_CODE (in_pat
) != PARALLEL
)
588 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
590 in_exp
= XVECEXP (in_pat
, 0, i
);
591 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
593 else if (GET_CODE (in_exp
) != SET
)
596 if (MEM_P (SET_DEST (in_exp
)))
598 out_set
= single_set (out_insn
);
601 out_pat
= PATTERN (out_insn
);
602 if (GET_CODE (out_pat
) != PARALLEL
)
604 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
606 out_exp
= XVECEXP (out_pat
, 0, j
);
607 if ((GET_CODE (out_exp
) == CLOBBER
)
608 || (GET_CODE (out_exp
) == USE
))
610 else if (GET_CODE (out_exp
) != SET
)
617 return store_data_bypass_p (out_insn
, in_insn
);
620 /* Return true if we have D-form addressing in altivec registers. */
622 mode_supports_vmx_dform (machine_mode mode
)
624 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
627 /* Return true if we have D-form addressing in VSX registers. This addressing
628 is more limited than normal d-form addressing in that the offset must be
629 aligned on a 16-byte boundary. */
631 mode_supports_vsx_dform_quad (machine_mode mode
)
633 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
638 /* Target cpu costs. */
640 struct processor_costs
{
641 const int mulsi
; /* cost of SImode multiplication. */
642 const int mulsi_const
; /* cost of SImode multiplication by constant. */
643 const int mulsi_const9
; /* cost of SImode mult by short constant. */
644 const int muldi
; /* cost of DImode multiplication. */
645 const int divsi
; /* cost of SImode division. */
646 const int divdi
; /* cost of DImode division. */
647 const int fp
; /* cost of simple SFmode and DFmode insns. */
648 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
649 const int sdiv
; /* cost of SFmode division (fdivs). */
650 const int ddiv
; /* cost of DFmode division (fdiv). */
651 const int cache_line_size
; /* cache line size in bytes. */
652 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
653 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
654 const int simultaneous_prefetches
; /* number of parallel prefetch
656 const int sfdf_convert
; /* cost of SF->DF conversion. */
659 const struct processor_costs
*rs6000_cost
;
661 /* Processor costs (relative to an add) */
663 /* Instruction size costs on 32bit processors. */
665 struct processor_costs size32_cost
= {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
680 0, /* SF->DF convert */
683 /* Instruction size costs on 64bit processors. */
685 struct processor_costs size64_cost
= {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
700 0, /* SF->DF convert */
703 /* Instruction costs on RS64A processors. */
705 struct processor_costs rs64a_cost
= {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
720 0, /* SF->DF convert */
723 /* Instruction costs on MPCCORE processors. */
725 struct processor_costs mpccore_cost
= {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
740 0, /* SF->DF convert */
743 /* Instruction costs on PPC403 processors. */
745 struct processor_costs ppc403_cost
= {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
760 0, /* SF->DF convert */
763 /* Instruction costs on PPC405 processors. */
765 struct processor_costs ppc405_cost
= {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
780 0, /* SF->DF convert */
783 /* Instruction costs on PPC440 processors. */
785 struct processor_costs ppc440_cost
= {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
800 0, /* SF->DF convert */
803 /* Instruction costs on PPC476 processors. */
805 struct processor_costs ppc476_cost
= {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
820 0, /* SF->DF convert */
823 /* Instruction costs on PPC601 processors. */
825 struct processor_costs ppc601_cost
= {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
840 0, /* SF->DF convert */
843 /* Instruction costs on PPC603 processors. */
845 struct processor_costs ppc603_cost
= {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
860 0, /* SF->DF convert */
863 /* Instruction costs on PPC604 processors. */
865 struct processor_costs ppc604_cost
= {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
880 0, /* SF->DF convert */
883 /* Instruction costs on PPC604e processors. */
885 struct processor_costs ppc604e_cost
= {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
900 0, /* SF->DF convert */
903 /* Instruction costs on PPC620 processors. */
905 struct processor_costs ppc620_cost
= {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
920 0, /* SF->DF convert */
923 /* Instruction costs on PPC630 processors. */
925 struct processor_costs ppc630_cost
= {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
940 0, /* SF->DF convert */
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
946 struct processor_costs ppccell_cost
= {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
961 0, /* SF->DF convert */
964 /* Instruction costs on PPC750 and PPC7400 processors. */
966 struct processor_costs ppc750_cost
= {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
981 0, /* SF->DF convert */
984 /* Instruction costs on PPC7450 processors. */
986 struct processor_costs ppc7450_cost
= {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
1001 0, /* SF->DF convert */
1004 /* Instruction costs on PPC8540 processors. */
1006 struct processor_costs ppc8540_cost
= {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1026 struct processor_costs ppce300c2c3_cost
= {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1044 /* Instruction costs on PPCE500MC processors. */
1046 struct processor_costs ppce500mc_cost
= {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1064 /* Instruction costs on PPCE500MC64 processors. */
1066 struct processor_costs ppce500mc64_cost
= {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1084 /* Instruction costs on PPCE5500 processors. */
1086 struct processor_costs ppce5500_cost
= {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1104 /* Instruction costs on PPCE6500 processors. */
1106 struct processor_costs ppce6500_cost
= {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1124 /* Instruction costs on AppliedMicro Titan processors. */
1126 struct processor_costs titan_cost
= {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1146 struct processor_costs power4_cost
= {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1164 /* Instruction costs on POWER6 processors. */
1166 struct processor_costs power6_cost
= {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1184 /* Instruction costs on POWER7 processors. */
1186 struct processor_costs power7_cost
= {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1204 /* Instruction costs on POWER8 processors. */
1206 struct processor_costs power8_cost
= {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1224 /* Instruction costs on POWER9 processors. */
1226 struct processor_costs power9_cost
= {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1244 /* Instruction costs on POWER A2 processors. */
1246 struct processor_costs ppca2_cost
= {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_Q
1275 #undef RS6000_BUILTIN_X
1277 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1278 { NAME, ICODE, MASK, ATTR },
1280 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1281 { NAME, ICODE, MASK, ATTR },
1283 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1284 { NAME, ICODE, MASK, ATTR },
1286 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1298 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1301 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1304 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1307 struct rs6000_builtin_info_type
{
1309 const enum insn_code icode
;
1310 const HOST_WIDE_INT mask
;
1311 const unsigned attr
;
1314 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1316 #include "rs6000-builtin.def"
1319 #undef RS6000_BUILTIN_0
1320 #undef RS6000_BUILTIN_1
1321 #undef RS6000_BUILTIN_2
1322 #undef RS6000_BUILTIN_3
1323 #undef RS6000_BUILTIN_A
1324 #undef RS6000_BUILTIN_D
1325 #undef RS6000_BUILTIN_H
1326 #undef RS6000_BUILTIN_P
1327 #undef RS6000_BUILTIN_Q
1328 #undef RS6000_BUILTIN_X
1330 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1331 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1334 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1335 static struct machine_function
* rs6000_init_machine_status (void);
1336 static int rs6000_ra_ever_killed (void);
1337 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1338 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1339 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1340 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1341 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1342 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1343 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1344 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1346 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1348 static bool is_microcoded_insn (rtx_insn
*);
1349 static bool is_nonpipeline_insn (rtx_insn
*);
1350 static bool is_cracked_insn (rtx_insn
*);
1351 static bool is_load_insn (rtx
, rtx
*);
1352 static bool is_store_insn (rtx
, rtx
*);
1353 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1354 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1355 static bool insn_must_be_first_in_group (rtx_insn
*);
1356 static bool insn_must_be_last_in_group (rtx_insn
*);
1357 static void altivec_init_builtins (void);
1358 static tree
builtin_function_type (machine_mode
, machine_mode
,
1359 machine_mode
, machine_mode
,
1360 enum rs6000_builtins
, const char *name
);
1361 static void rs6000_common_init_builtins (void);
1362 static void paired_init_builtins (void);
1363 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1364 static void htm_init_builtins (void);
1365 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
1366 static rs6000_stack_t
*rs6000_stack_info (void);
1367 static void is_altivec_return_reg (rtx
, void *);
1368 int easy_vector_constant (rtx
, machine_mode
);
1369 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1370 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1371 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1374 static void macho_branch_islands (void);
1376 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1378 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1380 static bool rs6000_mode_dependent_address (const_rtx
);
1381 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1382 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1384 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1387 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1388 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1390 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
1392 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
1395 static bool rs6000_cannot_change_mode_class (machine_mode
,
1398 static bool rs6000_debug_cannot_change_mode_class (machine_mode
,
1401 static bool rs6000_save_toc_in_prologue_p (void);
1402 static rtx
rs6000_internal_arg_pointer (void);
1404 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1406 = rs6000_legitimize_reload_address
;
1408 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1409 = rs6000_mode_dependent_address
;
1411 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1413 = rs6000_secondary_reload_class
;
1415 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1416 = rs6000_preferred_reload_class
;
1418 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1420 = rs6000_secondary_memory_needed
;
1422 bool (*rs6000_cannot_change_mode_class_ptr
) (machine_mode
,
1425 = rs6000_cannot_change_mode_class
;
1427 const int INSN_NOT_AVAILABLE
= -1;
1429 static void rs6000_print_isa_options (FILE *, int, const char *,
1431 static void rs6000_print_builtin_options (FILE *, int, const char *,
1433 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1435 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1436 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1437 enum rs6000_reg_type
,
1439 secondary_reload_info
*,
1441 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1442 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1443 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1445 /* Hash table stuff for keeping track of TOC entries. */
1447 struct GTY((for_user
)) toc_hash_struct
1449 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1450 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1452 machine_mode key_mode
;
1456 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1458 static hashval_t
hash (toc_hash_struct
*);
1459 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1462 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1464 /* Hash table to keep track of the argument types for builtin functions. */
1466 struct GTY((for_user
)) builtin_hash_struct
1469 machine_mode mode
[4]; /* return value + 3 arguments. */
1470 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1473 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1475 static hashval_t
hash (builtin_hash_struct
*);
1476 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1479 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1482 /* Default register names. */
1483 char rs6000_reg_names
[][8] =
1485 "0", "1", "2", "3", "4", "5", "6", "7",
1486 "8", "9", "10", "11", "12", "13", "14", "15",
1487 "16", "17", "18", "19", "20", "21", "22", "23",
1488 "24", "25", "26", "27", "28", "29", "30", "31",
1489 "0", "1", "2", "3", "4", "5", "6", "7",
1490 "8", "9", "10", "11", "12", "13", "14", "15",
1491 "16", "17", "18", "19", "20", "21", "22", "23",
1492 "24", "25", "26", "27", "28", "29", "30", "31",
1493 "mq", "lr", "ctr","ap",
1494 "0", "1", "2", "3", "4", "5", "6", "7",
1496 /* AltiVec registers. */
1497 "0", "1", "2", "3", "4", "5", "6", "7",
1498 "8", "9", "10", "11", "12", "13", "14", "15",
1499 "16", "17", "18", "19", "20", "21", "22", "23",
1500 "24", "25", "26", "27", "28", "29", "30", "31",
1502 /* Soft frame pointer. */
1504 /* HTM SPR registers. */
1505 "tfhar", "tfiar", "texasr"
1508 #ifdef TARGET_REGNAMES
1509 static const char alt_reg_names
[][8] =
1511 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1512 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1513 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1514 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1515 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1516 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1517 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1518 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1519 "mq", "lr", "ctr", "ap",
1520 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1522 /* AltiVec registers. */
1523 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1524 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1525 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1526 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1528 /* Soft frame pointer. */
1530 /* HTM SPR registers. */
1531 "tfhar", "tfiar", "texasr"
1535 /* Table of valid machine attributes. */
1537 static const struct attribute_spec rs6000_attribute_table
[] =
1539 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1540 affects_type_identity } */
1541 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1543 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1545 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1547 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1549 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1551 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1552 SUBTARGET_ATTRIBUTE_TABLE
,
1554 { NULL
, 0, 0, false, false, false, NULL
, false }
1557 #ifndef TARGET_PROFILE_KERNEL
1558 #define TARGET_PROFILE_KERNEL 0
1561 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1562 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1564 /* Initialize the GCC target structure. */
1565 #undef TARGET_ATTRIBUTE_TABLE
1566 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1567 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1568 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1569 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1570 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1572 #undef TARGET_ASM_ALIGNED_DI_OP
1573 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1575 /* Default unaligned ops are only provided for ELF. Find the ops needed
1576 for non-ELF systems. */
1577 #ifndef OBJECT_FORMAT_ELF
1579 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1581 #undef TARGET_ASM_UNALIGNED_HI_OP
1582 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1583 #undef TARGET_ASM_UNALIGNED_SI_OP
1584 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1585 #undef TARGET_ASM_UNALIGNED_DI_OP
1586 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1589 #undef TARGET_ASM_UNALIGNED_HI_OP
1590 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1591 #undef TARGET_ASM_UNALIGNED_SI_OP
1592 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1593 #undef TARGET_ASM_UNALIGNED_DI_OP
1594 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1595 #undef TARGET_ASM_ALIGNED_DI_OP
1596 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1600 /* This hook deals with fixups for relocatable code and DI-mode objects
1602 #undef TARGET_ASM_INTEGER
1603 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1605 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1606 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1607 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1610 #undef TARGET_SET_UP_BY_PROLOGUE
1611 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1613 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1614 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1615 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1616 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1617 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1618 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1619 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1620 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1621 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1622 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1623 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1624 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1626 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1627 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1629 #undef TARGET_INTERNAL_ARG_POINTER
1630 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1632 #undef TARGET_HAVE_TLS
1633 #define TARGET_HAVE_TLS HAVE_AS_TLS
1635 #undef TARGET_CANNOT_FORCE_CONST_MEM
1636 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1638 #undef TARGET_DELEGITIMIZE_ADDRESS
1639 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1641 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1642 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1644 #undef TARGET_LEGITIMATE_COMBINED_INSN
1645 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1647 #undef TARGET_ASM_FUNCTION_PROLOGUE
1648 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1649 #undef TARGET_ASM_FUNCTION_EPILOGUE
1650 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1652 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1653 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1655 #undef TARGET_LEGITIMIZE_ADDRESS
1656 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1658 #undef TARGET_SCHED_VARIABLE_ISSUE
1659 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1661 #undef TARGET_SCHED_ISSUE_RATE
1662 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1663 #undef TARGET_SCHED_ADJUST_COST
1664 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1665 #undef TARGET_SCHED_ADJUST_PRIORITY
1666 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1667 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1668 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1669 #undef TARGET_SCHED_INIT
1670 #define TARGET_SCHED_INIT rs6000_sched_init
1671 #undef TARGET_SCHED_FINISH
1672 #define TARGET_SCHED_FINISH rs6000_sched_finish
1673 #undef TARGET_SCHED_REORDER
1674 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1675 #undef TARGET_SCHED_REORDER2
1676 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1678 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1679 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1681 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1682 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1684 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1685 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1686 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1687 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1688 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1689 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1690 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1691 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1693 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1694 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1696 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1697 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1698 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1699 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1700 rs6000_builtin_support_vector_misalignment
1701 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1702 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1703 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1704 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1705 rs6000_builtin_vectorization_cost
1706 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1707 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1708 rs6000_preferred_simd_mode
1709 #undef TARGET_VECTORIZE_INIT_COST
1710 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1711 #undef TARGET_VECTORIZE_ADD_STMT_COST
1712 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1713 #undef TARGET_VECTORIZE_FINISH_COST
1714 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1715 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1716 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1718 #undef TARGET_INIT_BUILTINS
1719 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1720 #undef TARGET_BUILTIN_DECL
1721 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1723 #undef TARGET_FOLD_BUILTIN
1724 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1725 #undef TARGET_GIMPLE_FOLD_BUILTIN
1726 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1728 #undef TARGET_EXPAND_BUILTIN
1729 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1731 #undef TARGET_MANGLE_TYPE
1732 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1734 #undef TARGET_INIT_LIBFUNCS
1735 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1738 #undef TARGET_BINDS_LOCAL_P
1739 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1742 #undef TARGET_MS_BITFIELD_LAYOUT_P
1743 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1745 #undef TARGET_ASM_OUTPUT_MI_THUNK
1746 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1748 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1749 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1751 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1752 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1754 #undef TARGET_REGISTER_MOVE_COST
1755 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1756 #undef TARGET_MEMORY_MOVE_COST
1757 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1758 #undef TARGET_CANNOT_COPY_INSN_P
1759 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1760 #undef TARGET_RTX_COSTS
1761 #define TARGET_RTX_COSTS rs6000_rtx_costs
1762 #undef TARGET_ADDRESS_COST
1763 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1765 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1766 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1768 #undef TARGET_PROMOTE_FUNCTION_MODE
1769 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1771 #undef TARGET_RETURN_IN_MEMORY
1772 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1774 #undef TARGET_RETURN_IN_MSB
1775 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1777 #undef TARGET_SETUP_INCOMING_VARARGS
1778 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1780 /* Always strict argument naming on rs6000. */
1781 #undef TARGET_STRICT_ARGUMENT_NAMING
1782 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1783 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1784 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1785 #undef TARGET_SPLIT_COMPLEX_ARG
1786 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1787 #undef TARGET_MUST_PASS_IN_STACK
1788 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1789 #undef TARGET_PASS_BY_REFERENCE
1790 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1791 #undef TARGET_ARG_PARTIAL_BYTES
1792 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1793 #undef TARGET_FUNCTION_ARG_ADVANCE
1794 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1795 #undef TARGET_FUNCTION_ARG
1796 #define TARGET_FUNCTION_ARG rs6000_function_arg
1797 #undef TARGET_FUNCTION_ARG_BOUNDARY
1798 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1800 #undef TARGET_BUILD_BUILTIN_VA_LIST
1801 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1803 #undef TARGET_EXPAND_BUILTIN_VA_START
1804 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1806 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1807 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1809 #undef TARGET_EH_RETURN_FILTER_MODE
1810 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1812 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1813 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1815 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1816 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1818 #undef TARGET_FLOATN_MODE
1819 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1821 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1822 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1824 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1825 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1827 #undef TARGET_MD_ASM_ADJUST
1828 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1830 #undef TARGET_OPTION_OVERRIDE
1831 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1833 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1834 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1835 rs6000_builtin_vectorized_function
1837 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1838 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1839 rs6000_builtin_md_vectorized_function
1841 #undef TARGET_STACK_PROTECT_GUARD
1842 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1845 #undef TARGET_STACK_PROTECT_FAIL
1846 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1850 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1851 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1854 /* Use a 32-bit anchor range. This leads to sequences like:
1856 addis tmp,anchor,high
1859 where tmp itself acts as an anchor, and can be shared between
1860 accesses to the same 64k page. */
1861 #undef TARGET_MIN_ANCHOR_OFFSET
1862 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1863 #undef TARGET_MAX_ANCHOR_OFFSET
1864 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1865 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1866 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1867 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1868 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1870 #undef TARGET_BUILTIN_RECIPROCAL
1871 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1873 #undef TARGET_SECONDARY_RELOAD
1874 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1876 #undef TARGET_LEGITIMATE_ADDRESS_P
1877 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1879 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1880 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1882 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1883 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1885 #undef TARGET_CAN_ELIMINATE
1886 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1888 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1889 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1891 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1892 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1894 #undef TARGET_TRAMPOLINE_INIT
1895 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1897 #undef TARGET_FUNCTION_VALUE
1898 #define TARGET_FUNCTION_VALUE rs6000_function_value
1900 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1901 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1903 #undef TARGET_OPTION_SAVE
1904 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1906 #undef TARGET_OPTION_RESTORE
1907 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1909 #undef TARGET_OPTION_PRINT
1910 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1912 #undef TARGET_CAN_INLINE_P
1913 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1915 #undef TARGET_SET_CURRENT_FUNCTION
1916 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1918 #undef TARGET_LEGITIMATE_CONSTANT_P
1919 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1921 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1922 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1924 #undef TARGET_CAN_USE_DOLOOP_P
1925 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1927 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1928 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1930 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1931 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1932 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1933 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1934 #undef TARGET_UNWIND_WORD_MODE
1935 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1937 #undef TARGET_OFFLOAD_OPTIONS
1938 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1940 #undef TARGET_C_MODE_FOR_SUFFIX
1941 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1943 #undef TARGET_INVALID_BINARY_OP
1944 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1946 #undef TARGET_OPTAB_SUPPORTED_P
1947 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1949 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1950 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1952 #undef TARGET_COMPARE_VERSION_PRIORITY
1953 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1955 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1956 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1957 rs6000_generate_version_dispatcher_body
1959 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1960 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1961 rs6000_get_function_versions_dispatcher
1963 #undef TARGET_OPTION_FUNCTION_VERSIONS
1964 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1968 /* Processor table. */
1971 const char *const name
; /* Canonical processor name. */
1972 const enum processor_type processor
; /* Processor type enum value. */
1973 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1976 static struct rs6000_ptt
const processor_target_table
[] =
1978 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1979 #include "rs6000-cpus.def"
1983 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1987 rs6000_cpu_name_lookup (const char *name
)
1993 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1994 if (! strcmp (name
, processor_target_table
[i
].name
))
2002 /* Return number of consecutive hard regs needed starting at reg REGNO
2003 to hold something of mode MODE.
2004 This is ordinarily the length in words of a value of mode MODE
2005 but can be less for certain modes in special long registers.
2007 POWER and PowerPC GPRs hold 32 bits worth;
2008 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2011 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2013 unsigned HOST_WIDE_INT reg_size
;
2015 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2016 128-bit floating point that can go in vector registers, which has VSX
2017 memory addressing. */
2018 if (FP_REGNO_P (regno
))
2019 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2020 ? UNITS_PER_VSX_WORD
2021 : UNITS_PER_FP_WORD
);
2023 else if (ALTIVEC_REGNO_P (regno
))
2024 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2027 reg_size
= UNITS_PER_WORD
;
2029 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2032 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2035 rs6000_hard_regno_mode_ok (int regno
, machine_mode mode
)
2037 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2039 if (COMPLEX_MODE_P (mode
))
2040 mode
= GET_MODE_INNER (mode
);
2042 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2043 register combinations, and use PTImode where we need to deal with quad
2044 word memory operations. Don't allow quad words in the argument or frame
2045 pointer registers, just registers 0..31. */
2046 if (mode
== PTImode
)
2047 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2048 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2049 && ((regno
& 1) == 0));
2051 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2052 implementations. Don't allow an item to be split between a FP register
2053 and an Altivec register. Allow TImode in all VSX registers if the user
2055 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2056 && (VECTOR_MEM_VSX_P (mode
)
2057 || FLOAT128_VECTOR_P (mode
)
2058 || reg_addr
[mode
].scalar_in_vmx_p
2060 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2062 if (FP_REGNO_P (regno
))
2063 return FP_REGNO_P (last_regno
);
2065 if (ALTIVEC_REGNO_P (regno
))
2067 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2070 return ALTIVEC_REGNO_P (last_regno
);
2074 /* The GPRs can hold any mode, but values bigger than one register
2075 cannot go past R31. */
2076 if (INT_REGNO_P (regno
))
2077 return INT_REGNO_P (last_regno
);
2079 /* The float registers (except for VSX vector modes) can only hold floating
2080 modes and DImode. */
2081 if (FP_REGNO_P (regno
))
2083 if (FLOAT128_VECTOR_P (mode
))
2086 if (SCALAR_FLOAT_MODE_P (mode
)
2087 && (mode
!= TDmode
|| (regno
% 2) == 0)
2088 && FP_REGNO_P (last_regno
))
2091 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2093 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2096 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2099 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2103 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
2104 && PAIRED_VECTOR_MODE (mode
))
2110 /* The CR register can only hold CC modes. */
2111 if (CR_REGNO_P (regno
))
2112 return GET_MODE_CLASS (mode
) == MODE_CC
;
2114 if (CA_REGNO_P (regno
))
2115 return mode
== Pmode
|| mode
== SImode
;
2117 /* AltiVec only in AldyVec registers. */
2118 if (ALTIVEC_REGNO_P (regno
))
2119 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2120 || mode
== V1TImode
);
2122 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2123 and it must be able to fit within the register set. */
2125 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2128 /* Print interesting facts about registers. */
2130 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2134 for (r
= first_regno
; r
<= last_regno
; ++r
)
2136 const char *comma
= "";
2139 if (first_regno
== last_regno
)
2140 fprintf (stderr
, "%s:\t", reg_name
);
2142 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2145 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2146 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2150 fprintf (stderr
, ",\n\t");
2155 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2156 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2157 rs6000_hard_regno_nregs
[m
][r
]);
2159 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2164 if (call_used_regs
[r
])
2168 fprintf (stderr
, ",\n\t");
2173 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2181 fprintf (stderr
, ",\n\t");
2186 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2192 fprintf (stderr
, ",\n\t");
2196 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2197 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2202 fprintf (stderr
, ",\n\t");
2206 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2211 rs6000_debug_vector_unit (enum rs6000_vector v
)
2217 case VECTOR_NONE
: ret
= "none"; break;
2218 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2219 case VECTOR_VSX
: ret
= "vsx"; break;
2220 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2221 case VECTOR_PAIRED
: ret
= "paired"; break;
2222 case VECTOR_OTHER
: ret
= "other"; break;
2223 default: ret
= "unknown"; break;
2229 /* Inner function printing just the address mask for a particular reload
2231 DEBUG_FUNCTION
char *
2232 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2237 if ((mask
& RELOAD_REG_VALID
) != 0)
2239 else if (keep_spaces
)
2242 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2244 else if (keep_spaces
)
2247 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2249 else if (keep_spaces
)
2252 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2254 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2256 else if (keep_spaces
)
2259 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2261 else if (keep_spaces
)
2264 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2266 else if (keep_spaces
)
2269 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2271 else if (keep_spaces
)
2279 /* Print the address masks in a human readble fashion. */
2281 rs6000_debug_print_mode (ssize_t m
)
2287 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2288 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2289 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2290 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2292 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2293 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2294 fprintf (stderr
, " Reload=%c%c",
2295 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2296 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2298 spaces
+= sizeof (" Reload=sl") - 1;
2300 if (reg_addr
[m
].scalar_in_vmx_p
)
2302 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2306 spaces
+= sizeof (" Upper=y") - 1;
2308 fuse_extra_p
= ((reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2309 || reg_addr
[m
].fused_toc
);
2312 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2314 if (rc
!= RELOAD_REG_ANY
)
2316 if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2317 || reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2318 || reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
2319 || reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
2320 || reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2322 fuse_extra_p
= true;
2331 fprintf (stderr
, "%*s Fuse:", spaces
, "");
2334 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2336 if (rc
!= RELOAD_REG_ANY
)
2340 if (reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
)
2342 else if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
)
2347 if (reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2349 else if (reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
)
2354 if (load
== '-' && store
== '-')
2358 fprintf (stderr
, "%*s%c=%c%c", (spaces
+ 1), "",
2359 reload_reg_map
[rc
].name
[0], load
, store
);
2365 if (reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2367 fprintf (stderr
, "%*sP8gpr", (spaces
+ 1), "");
2371 spaces
+= sizeof (" P8gpr") - 1;
2373 if (reg_addr
[m
].fused_toc
)
2375 fprintf (stderr
, "%*sToc", (spaces
+ 1), "");
2379 spaces
+= sizeof (" Toc") - 1;
2382 spaces
+= sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2384 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2385 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2387 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2389 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2390 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2393 fputs ("\n", stderr
);
2396 #define DEBUG_FMT_ID "%-32s= "
2397 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2398 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2399 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2401 /* Print various interesting information with -mdebug=reg. */
2403 rs6000_debug_reg_global (void)
2405 static const char *const tf
[2] = { "false", "true" };
2406 const char *nl
= (const char *)0;
2409 char costly_num
[20];
2411 char flags_buffer
[40];
2412 const char *costly_str
;
2413 const char *nop_str
;
2414 const char *trace_str
;
2415 const char *abi_str
;
2416 const char *cmodel_str
;
2417 struct cl_target_option cl_opts
;
2419 /* Modes we want tieable information on. */
2420 static const machine_mode print_tieable_modes
[] = {
2456 /* Virtual regs we are interested in. */
2457 const static struct {
2458 int regno
; /* register number. */
2459 const char *name
; /* register name. */
2460 } virtual_regs
[] = {
2461 { STACK_POINTER_REGNUM
, "stack pointer:" },
2462 { TOC_REGNUM
, "toc: " },
2463 { STATIC_CHAIN_REGNUM
, "static chain: " },
2464 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2465 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2466 { ARG_POINTER_REGNUM
, "arg pointer: " },
2467 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2468 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2469 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2470 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2471 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2472 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2473 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2474 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2475 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2476 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2479 fputs ("\nHard register information:\n", stderr
);
2480 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2481 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2482 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2485 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2486 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2487 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2488 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2489 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2490 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2492 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2493 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2494 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2498 "d reg_class = %s\n"
2499 "f reg_class = %s\n"
2500 "v reg_class = %s\n"
2501 "wa reg_class = %s\n"
2502 "wb reg_class = %s\n"
2503 "wd reg_class = %s\n"
2504 "we reg_class = %s\n"
2505 "wf reg_class = %s\n"
2506 "wg reg_class = %s\n"
2507 "wh reg_class = %s\n"
2508 "wi reg_class = %s\n"
2509 "wj reg_class = %s\n"
2510 "wk reg_class = %s\n"
2511 "wl reg_class = %s\n"
2512 "wm reg_class = %s\n"
2513 "wo reg_class = %s\n"
2514 "wp reg_class = %s\n"
2515 "wq reg_class = %s\n"
2516 "wr reg_class = %s\n"
2517 "ws reg_class = %s\n"
2518 "wt reg_class = %s\n"
2519 "wu reg_class = %s\n"
2520 "wv reg_class = %s\n"
2521 "ww reg_class = %s\n"
2522 "wx reg_class = %s\n"
2523 "wy reg_class = %s\n"
2524 "wz reg_class = %s\n"
2525 "wA reg_class = %s\n"
2526 "wH reg_class = %s\n"
2527 "wI reg_class = %s\n"
2528 "wJ reg_class = %s\n"
2529 "wK reg_class = %s\n"
2531 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2532 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2533 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2534 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2535 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2536 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2537 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2538 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2539 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2540 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2541 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2542 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2543 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2544 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2545 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2546 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2547 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2548 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2549 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2550 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2551 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2552 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2553 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2554 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2555 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2556 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2557 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2558 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2559 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2560 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2561 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2562 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2565 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2566 rs6000_debug_print_mode (m
);
2568 fputs ("\n", stderr
);
2570 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2572 machine_mode mode1
= print_tieable_modes
[m1
];
2573 bool first_time
= true;
2575 nl
= (const char *)0;
2576 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2578 machine_mode mode2
= print_tieable_modes
[m2
];
2579 if (mode1
!= mode2
&& MODES_TIEABLE_P (mode1
, mode2
))
2583 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2588 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2593 fputs ("\n", stderr
);
2599 if (rs6000_recip_control
)
2601 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2603 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2604 if (rs6000_recip_bits
[m
])
2607 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2609 (RS6000_RECIP_AUTO_RE_P (m
)
2611 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2612 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2614 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2617 fputs ("\n", stderr
);
2620 if (rs6000_cpu_index
>= 0)
2622 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2624 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2626 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2627 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2630 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2632 if (rs6000_tune_index
>= 0)
2634 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2636 = processor_target_table
[rs6000_tune_index
].target_enable
;
2638 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2639 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2642 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2644 cl_target_option_save (&cl_opts
, &global_options
);
2645 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2648 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2649 rs6000_isa_flags_explicit
);
2651 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2652 rs6000_builtin_mask
);
2654 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2656 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2657 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2659 switch (rs6000_sched_costly_dep
)
2661 case max_dep_latency
:
2662 costly_str
= "max_dep_latency";
2666 costly_str
= "no_dep_costly";
2669 case all_deps_costly
:
2670 costly_str
= "all_deps_costly";
2673 case true_store_to_load_dep_costly
:
2674 costly_str
= "true_store_to_load_dep_costly";
2677 case store_to_load_dep_costly
:
2678 costly_str
= "store_to_load_dep_costly";
2682 costly_str
= costly_num
;
2683 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2687 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2689 switch (rs6000_sched_insert_nops
)
2691 case sched_finish_regroup_exact
:
2692 nop_str
= "sched_finish_regroup_exact";
2695 case sched_finish_pad_groups
:
2696 nop_str
= "sched_finish_pad_groups";
2699 case sched_finish_none
:
2700 nop_str
= "sched_finish_none";
2705 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2709 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2711 switch (rs6000_sdata
)
2718 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2722 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2726 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2731 switch (rs6000_traceback
)
2733 case traceback_default
: trace_str
= "default"; break;
2734 case traceback_none
: trace_str
= "none"; break;
2735 case traceback_part
: trace_str
= "part"; break;
2736 case traceback_full
: trace_str
= "full"; break;
2737 default: trace_str
= "unknown"; break;
2740 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2742 switch (rs6000_current_cmodel
)
2744 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2745 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2746 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2747 default: cmodel_str
= "unknown"; break;
2750 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2752 switch (rs6000_current_abi
)
2754 case ABI_NONE
: abi_str
= "none"; break;
2755 case ABI_AIX
: abi_str
= "aix"; break;
2756 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2757 case ABI_V4
: abi_str
= "V4"; break;
2758 case ABI_DARWIN
: abi_str
= "darwin"; break;
2759 default: abi_str
= "unknown"; break;
2762 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2764 if (rs6000_altivec_abi
)
2765 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2767 if (rs6000_darwin64_abi
)
2768 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2770 fprintf (stderr
, DEBUG_FMT_S
, "single_float",
2771 (TARGET_SINGLE_FLOAT
? "true" : "false"));
2773 fprintf (stderr
, DEBUG_FMT_S
, "double_float",
2774 (TARGET_DOUBLE_FLOAT
? "true" : "false"));
2776 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2777 (TARGET_SOFT_FLOAT
? "true" : "false"));
2779 if (TARGET_LINK_STACK
)
2780 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2782 if (TARGET_P8_FUSION
)
2786 strcpy (options
, (TARGET_P9_FUSION
) ? "power9" : "power8");
2787 if (TARGET_TOC_FUSION
)
2788 strcat (options
, ", toc");
2790 if (TARGET_P8_FUSION_SIGN
)
2791 strcat (options
, ", sign");
2793 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2796 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2797 TARGET_SECURE_PLT
? "secure" : "bss");
2798 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2799 aix_struct_return
? "aix" : "sysv");
2800 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2801 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2802 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2803 tf
[!!rs6000_align_branch_targets
]);
2804 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2805 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2806 rs6000_long_double_type_size
);
2807 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2808 (int)rs6000_sched_restricted_insns_priority
);
2809 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2811 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2812 (int)RS6000_BUILTIN_COUNT
);
2814 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2815 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2818 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2819 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2821 if (TARGET_DIRECT_MOVE_128
)
2822 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2823 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2827 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2828 legitimate address support to figure out the appropriate addressing to
2832 rs6000_setup_reg_addr_masks (void)
2834 ssize_t rc
, reg
, m
, nregs
;
2835 addr_mask_type any_addr_mask
, addr_mask
;
2837 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2839 machine_mode m2
= (machine_mode
) m
;
2840 bool complex_p
= false;
2841 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2844 if (COMPLEX_MODE_P (m2
))
2847 m2
= GET_MODE_INNER (m2
);
2850 msize
= GET_MODE_SIZE (m2
);
2852 /* SDmode is special in that we want to access it only via REG+REG
2853 addressing on power7 and above, since we want to use the LFIWZX and
2854 STFIWZX instructions to load it. */
2855 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2858 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2861 reg
= reload_reg_map
[rc
].reg
;
2863 /* Can mode values go in the GPR/FPR/Altivec registers? */
2864 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2866 bool small_int_vsx_p
= (small_int_p
2867 && (rc
== RELOAD_REG_FPR
2868 || rc
== RELOAD_REG_VMX
));
2870 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2871 addr_mask
|= RELOAD_REG_VALID
;
2873 /* Indicate if the mode takes more than 1 physical register. If
2874 it takes a single register, indicate it can do REG+REG
2875 addressing. Small integers in VSX registers can only do
2876 REG+REG addressing. */
2877 if (small_int_vsx_p
)
2878 addr_mask
|= RELOAD_REG_INDEXED
;
2879 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2880 addr_mask
|= RELOAD_REG_MULTIPLE
;
2882 addr_mask
|= RELOAD_REG_INDEXED
;
2884 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2885 addressing. If we allow scalars into Altivec registers,
2886 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2889 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2891 && !VECTOR_MODE_P (m2
)
2892 && !FLOAT128_VECTOR_P (m2
)
2894 && !small_int_vsx_p
)
2896 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2898 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2899 we don't allow PRE_MODIFY for some multi-register
2904 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2908 if (TARGET_POWERPC64
)
2909 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2915 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2921 /* GPR and FPR registers can do REG+OFFSET addressing, except
2922 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2923 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2924 if ((addr_mask
!= 0) && !indexed_only_p
2926 && (rc
== RELOAD_REG_GPR
2927 || ((msize
== 8 || m2
== SFmode
)
2928 && (rc
== RELOAD_REG_FPR
2929 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
2930 addr_mask
|= RELOAD_REG_OFFSET
;
2932 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2933 instructions are enabled. The offset for 128-bit VSX registers is
2934 only 12-bits. While GPRs can handle the full offset range, VSX
2935 registers can only handle the restricted range. */
2936 else if ((addr_mask
!= 0) && !indexed_only_p
2937 && msize
== 16 && TARGET_P9_VECTOR
2938 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
2939 || (m2
== TImode
&& TARGET_VSX
)))
2941 addr_mask
|= RELOAD_REG_OFFSET
;
2942 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
2943 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
2946 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2947 addressing on 128-bit types. */
2948 if (rc
== RELOAD_REG_VMX
&& msize
== 16
2949 && (addr_mask
& RELOAD_REG_VALID
) != 0)
2950 addr_mask
|= RELOAD_REG_AND_M16
;
2952 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
2953 any_addr_mask
|= addr_mask
;
2956 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
2961 /* Initialize the various global tables that are based on register size. */
2963 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
2969 /* Precalculate REGNO_REG_CLASS. */
2970 rs6000_regno_regclass
[0] = GENERAL_REGS
;
2971 for (r
= 1; r
< 32; ++r
)
2972 rs6000_regno_regclass
[r
] = BASE_REGS
;
2974 for (r
= 32; r
< 64; ++r
)
2975 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
2977 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2978 rs6000_regno_regclass
[r
] = NO_REGS
;
2980 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
2981 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
2983 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
2984 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
2985 rs6000_regno_regclass
[r
] = CR_REGS
;
2987 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
2988 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
2989 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
2990 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
2991 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
2992 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
2993 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
2994 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
2995 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
2996 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
2998 /* Precalculate register class to simpler reload register class. We don't
2999 need all of the register classes that are combinations of different
3000 classes, just the simple ones that have constraint letters. */
3001 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3002 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3004 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3005 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3006 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3007 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3008 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3009 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3010 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3011 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3012 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3013 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3017 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3018 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3022 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3023 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3026 /* Precalculate the valid memory formats as well as the vector information,
3027 this must be set up before the rs6000_hard_regno_nregs_internal calls
3029 gcc_assert ((int)VECTOR_NONE
== 0);
3030 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3031 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3033 gcc_assert ((int)CODE_FOR_nothing
== 0);
3034 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3036 gcc_assert ((int)NO_REGS
== 0);
3037 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3039 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3040 believes it can use native alignment or still uses 128-bit alignment. */
3041 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3052 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3053 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3054 if (TARGET_FLOAT128_TYPE
)
3056 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3057 rs6000_vector_align
[KFmode
] = 128;
3059 if (FLOAT128_IEEE_P (TFmode
))
3061 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3062 rs6000_vector_align
[TFmode
] = 128;
3066 /* V2DF mode, VSX only. */
3069 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3070 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3071 rs6000_vector_align
[V2DFmode
] = align64
;
3074 /* V4SF mode, either VSX or Altivec. */
3077 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3078 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3079 rs6000_vector_align
[V4SFmode
] = align32
;
3081 else if (TARGET_ALTIVEC
)
3083 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3084 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3085 rs6000_vector_align
[V4SFmode
] = align32
;
3088 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3092 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3093 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3094 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3095 rs6000_vector_align
[V4SImode
] = align32
;
3096 rs6000_vector_align
[V8HImode
] = align32
;
3097 rs6000_vector_align
[V16QImode
] = align32
;
3101 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3102 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3103 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3107 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3108 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3109 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3113 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3114 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3117 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3118 rs6000_vector_unit
[V2DImode
]
3119 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3120 rs6000_vector_align
[V2DImode
] = align64
;
3122 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3123 rs6000_vector_unit
[V1TImode
]
3124 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3125 rs6000_vector_align
[V1TImode
] = 128;
3128 /* DFmode, see if we want to use the VSX unit. Memory is handled
3129 differently, so don't set rs6000_vector_mem. */
3132 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3133 rs6000_vector_align
[DFmode
] = 64;
3136 /* SFmode, see if we want to use the VSX unit. */
3137 if (TARGET_P8_VECTOR
)
3139 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3140 rs6000_vector_align
[SFmode
] = 32;
3143 /* Allow TImode in VSX register and set the VSX memory macros. */
3146 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3147 rs6000_vector_align
[TImode
] = align64
;
3150 /* TODO add paired floating point vector support. */
3152 /* Register class constraints for the constraints that depend on compile
3153 switches. When the VSX code was added, different constraints were added
3154 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3155 of the VSX registers are used. The register classes for scalar floating
3156 point types is set, based on whether we allow that type into the upper
3157 (Altivec) registers. GCC has register classes to target the Altivec
3158 registers for load/store operations, to select using a VSX memory
3159 operation instead of the traditional floating point operation. The
3162 d - Register class to use with traditional DFmode instructions.
3163 f - Register class to use with traditional SFmode instructions.
3164 v - Altivec register.
3165 wa - Any VSX register.
3166 wc - Reserved to represent individual CR bits (used in LLVM).
3167 wd - Preferred register class for V2DFmode.
3168 wf - Preferred register class for V4SFmode.
3169 wg - Float register for power6x move insns.
3170 wh - FP register for direct move instructions.
3171 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3172 wj - FP or VSX register to hold 64-bit integers for direct moves.
3173 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3174 wl - Float register if we can do 32-bit signed int loads.
3175 wm - VSX register for ISA 2.07 direct move operations.
3176 wn - always NO_REGS.
3177 wr - GPR if 64-bit mode is permitted.
3178 ws - Register class to do ISA 2.06 DF operations.
3179 wt - VSX register for TImode in VSX registers.
3180 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3181 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3182 ww - Register class to do SF conversions in with VSX operations.
3183 wx - Float register if we can do 32-bit int stores.
3184 wy - Register class to do ISA 2.07 SF operations.
3185 wz - Float register if we can do 32-bit unsigned int loads.
3186 wH - Altivec register if SImode is allowed in VSX registers.
3187 wI - VSX register if SImode is allowed in VSX registers.
3188 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3189 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3191 if (TARGET_HARD_FLOAT
)
3192 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3194 if (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
3195 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3199 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3200 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3201 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3202 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3203 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3204 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3205 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3208 /* Add conditional constraints based on various options, to allow us to
3209 collapse multiple insn patterns. */
3211 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3213 if (TARGET_MFPGPR
) /* DFmode */
3214 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3217 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3219 if (TARGET_DIRECT_MOVE
)
3221 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3222 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3223 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3224 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3225 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3226 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3229 if (TARGET_POWERPC64
)
3231 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3232 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3235 if (TARGET_P8_VECTOR
) /* SFmode */
3237 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3238 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3239 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3241 else if (TARGET_VSX
)
3242 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3245 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3248 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3250 if (TARGET_FLOAT128_TYPE
)
3252 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3253 if (FLOAT128_IEEE_P (TFmode
))
3254 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3257 if (TARGET_P9_VECTOR
)
3259 /* Support for new D-form instructions. */
3260 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3262 /* Support for ISA 3.0 (power9) vectors. */
3263 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3266 /* Support for new direct moves (ISA 3.0 + 64bit). */
3267 if (TARGET_DIRECT_MOVE_128
)
3268 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3270 /* Support small integers in VSX registers. */
3271 if (TARGET_P8_VECTOR
)
3273 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3274 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3275 if (TARGET_P9_VECTOR
)
3277 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3278 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3282 /* Set up the reload helper and direct move functions. */
3283 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3287 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3288 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3289 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3290 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3291 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3292 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3293 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3294 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3295 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3296 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3297 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3298 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3299 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3300 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3301 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3302 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3303 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3304 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3305 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3306 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3308 if (FLOAT128_VECTOR_P (KFmode
))
3310 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3311 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3314 if (FLOAT128_VECTOR_P (TFmode
))
3316 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3317 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3320 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3322 if (TARGET_NO_SDMODE_STACK
)
3324 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3325 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3330 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3331 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3334 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3336 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3337 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3338 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3339 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3340 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3341 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3342 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3343 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3344 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3346 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3347 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3348 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3349 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3350 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3351 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3352 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3353 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3354 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3356 if (FLOAT128_VECTOR_P (KFmode
))
3358 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3359 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3362 if (FLOAT128_VECTOR_P (TFmode
))
3364 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3365 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3371 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3372 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3373 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3374 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3375 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3376 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3377 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3378 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3379 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3380 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3381 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3382 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3383 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3384 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3385 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3386 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3387 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3388 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3389 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3390 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3392 if (FLOAT128_VECTOR_P (KFmode
))
3394 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3395 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3398 if (FLOAT128_IEEE_P (TFmode
))
3400 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3401 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3404 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3406 if (TARGET_NO_SDMODE_STACK
)
3408 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3409 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3414 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3415 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3418 if (TARGET_DIRECT_MOVE
)
3420 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3421 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3422 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3426 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3427 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3429 if (TARGET_P8_VECTOR
)
3431 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3432 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3434 if (TARGET_P9_VECTOR
)
3436 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3437 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3442 /* Setup the fusion operations. */
3443 if (TARGET_P8_FUSION
)
3445 reg_addr
[QImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_qi
;
3446 reg_addr
[HImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_hi
;
3447 reg_addr
[SImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_si
;
3449 reg_addr
[DImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_di
;
3452 if (TARGET_P9_FUSION
)
3455 enum machine_mode mode
; /* mode of the fused type. */
3456 enum machine_mode pmode
; /* pointer mode. */
3457 enum rs6000_reload_reg_type rtype
; /* register type. */
3458 enum insn_code load
; /* load insn. */
3459 enum insn_code store
; /* store insn. */
3462 static const struct fuse_insns addis_insns
[] = {
3463 { SFmode
, DImode
, RELOAD_REG_FPR
,
3464 CODE_FOR_fusion_vsx_di_sf_load
,
3465 CODE_FOR_fusion_vsx_di_sf_store
},
3467 { SFmode
, SImode
, RELOAD_REG_FPR
,
3468 CODE_FOR_fusion_vsx_si_sf_load
,
3469 CODE_FOR_fusion_vsx_si_sf_store
},
3471 { DFmode
, DImode
, RELOAD_REG_FPR
,
3472 CODE_FOR_fusion_vsx_di_df_load
,
3473 CODE_FOR_fusion_vsx_di_df_store
},
3475 { DFmode
, SImode
, RELOAD_REG_FPR
,
3476 CODE_FOR_fusion_vsx_si_df_load
,
3477 CODE_FOR_fusion_vsx_si_df_store
},
3479 { DImode
, DImode
, RELOAD_REG_FPR
,
3480 CODE_FOR_fusion_vsx_di_di_load
,
3481 CODE_FOR_fusion_vsx_di_di_store
},
3483 { DImode
, SImode
, RELOAD_REG_FPR
,
3484 CODE_FOR_fusion_vsx_si_di_load
,
3485 CODE_FOR_fusion_vsx_si_di_store
},
3487 { QImode
, DImode
, RELOAD_REG_GPR
,
3488 CODE_FOR_fusion_gpr_di_qi_load
,
3489 CODE_FOR_fusion_gpr_di_qi_store
},
3491 { QImode
, SImode
, RELOAD_REG_GPR
,
3492 CODE_FOR_fusion_gpr_si_qi_load
,
3493 CODE_FOR_fusion_gpr_si_qi_store
},
3495 { HImode
, DImode
, RELOAD_REG_GPR
,
3496 CODE_FOR_fusion_gpr_di_hi_load
,
3497 CODE_FOR_fusion_gpr_di_hi_store
},
3499 { HImode
, SImode
, RELOAD_REG_GPR
,
3500 CODE_FOR_fusion_gpr_si_hi_load
,
3501 CODE_FOR_fusion_gpr_si_hi_store
},
3503 { SImode
, DImode
, RELOAD_REG_GPR
,
3504 CODE_FOR_fusion_gpr_di_si_load
,
3505 CODE_FOR_fusion_gpr_di_si_store
},
3507 { SImode
, SImode
, RELOAD_REG_GPR
,
3508 CODE_FOR_fusion_gpr_si_si_load
,
3509 CODE_FOR_fusion_gpr_si_si_store
},
3511 { SFmode
, DImode
, RELOAD_REG_GPR
,
3512 CODE_FOR_fusion_gpr_di_sf_load
,
3513 CODE_FOR_fusion_gpr_di_sf_store
},
3515 { SFmode
, SImode
, RELOAD_REG_GPR
,
3516 CODE_FOR_fusion_gpr_si_sf_load
,
3517 CODE_FOR_fusion_gpr_si_sf_store
},
3519 { DImode
, DImode
, RELOAD_REG_GPR
,
3520 CODE_FOR_fusion_gpr_di_di_load
,
3521 CODE_FOR_fusion_gpr_di_di_store
},
3523 { DFmode
, DImode
, RELOAD_REG_GPR
,
3524 CODE_FOR_fusion_gpr_di_df_load
,
3525 CODE_FOR_fusion_gpr_di_df_store
},
3528 machine_mode cur_pmode
= Pmode
;
3531 for (i
= 0; i
< ARRAY_SIZE (addis_insns
); i
++)
3533 machine_mode xmode
= addis_insns
[i
].mode
;
3534 enum rs6000_reload_reg_type rtype
= addis_insns
[i
].rtype
;
3536 if (addis_insns
[i
].pmode
!= cur_pmode
)
3539 if (rtype
== RELOAD_REG_FPR
&& !TARGET_HARD_FLOAT
)
3542 reg_addr
[xmode
].fusion_addis_ld
[rtype
] = addis_insns
[i
].load
;
3543 reg_addr
[xmode
].fusion_addis_st
[rtype
] = addis_insns
[i
].store
;
3545 if (rtype
== RELOAD_REG_FPR
&& TARGET_P9_VECTOR
)
3547 reg_addr
[xmode
].fusion_addis_ld
[RELOAD_REG_VMX
]
3548 = addis_insns
[i
].load
;
3549 reg_addr
[xmode
].fusion_addis_st
[RELOAD_REG_VMX
]
3550 = addis_insns
[i
].store
;
3555 /* Note which types we support fusing TOC setup plus memory insn. We only do
3556 fused TOCs for medium/large code models. */
3557 if (TARGET_P8_FUSION
&& TARGET_TOC_FUSION
&& TARGET_POWERPC64
3558 && (TARGET_CMODEL
!= CMODEL_SMALL
))
3560 reg_addr
[QImode
].fused_toc
= true;
3561 reg_addr
[HImode
].fused_toc
= true;
3562 reg_addr
[SImode
].fused_toc
= true;
3563 reg_addr
[DImode
].fused_toc
= true;
3564 if (TARGET_HARD_FLOAT
)
3566 if (TARGET_SINGLE_FLOAT
)
3567 reg_addr
[SFmode
].fused_toc
= true;
3568 if (TARGET_DOUBLE_FLOAT
)
3569 reg_addr
[DFmode
].fused_toc
= true;
3573 /* Precalculate HARD_REGNO_NREGS. */
3574 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3575 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3576 rs6000_hard_regno_nregs
[m
][r
]
3577 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3579 /* Precalculate HARD_REGNO_MODE_OK. */
3580 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3581 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3582 if (rs6000_hard_regno_mode_ok (r
, (machine_mode
)m
))
3583 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3585 /* Precalculate CLASS_MAX_NREGS sizes. */
3586 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3590 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3591 reg_size
= UNITS_PER_VSX_WORD
;
3593 else if (c
== ALTIVEC_REGS
)
3594 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3596 else if (c
== FLOAT_REGS
)
3597 reg_size
= UNITS_PER_FP_WORD
;
3600 reg_size
= UNITS_PER_WORD
;
3602 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3604 machine_mode m2
= (machine_mode
)m
;
3605 int reg_size2
= reg_size
;
3607 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3609 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3610 reg_size2
= UNITS_PER_FP_WORD
;
3612 rs6000_class_max_nregs
[m
][c
]
3613 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3617 /* Calculate which modes to automatically generate code to use a the
3618 reciprocal divide and square root instructions. In the future, possibly
3619 automatically generate the instructions even if the user did not specify
3620 -mrecip. The older machines double precision reciprocal sqrt estimate is
3621 not accurate enough. */
3622 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3624 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3626 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3627 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3628 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3629 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3630 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3632 if (TARGET_FRSQRTES
)
3633 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3635 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3636 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3637 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3638 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3639 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3641 if (rs6000_recip_control
)
3643 if (!flag_finite_math_only
)
3644 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3646 if (flag_trapping_math
)
3647 warning (0, "%qs requires %qs or %qs", "-mrecip",
3648 "-fno-trapping-math", "-ffast-math");
3649 if (!flag_reciprocal_math
)
3650 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3652 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3654 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3655 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3656 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3658 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3659 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3660 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3662 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3663 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3664 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3666 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3667 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3668 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3670 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3671 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3672 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3674 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3675 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3676 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3678 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3679 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3680 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3682 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3683 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3684 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3688 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3689 legitimate address support to figure out the appropriate addressing to
3691 rs6000_setup_reg_addr_masks ();
3693 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3695 if (TARGET_DEBUG_REG
)
3696 rs6000_debug_reg_global ();
3698 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3700 "SImode variable mult cost = %d\n"
3701 "SImode constant mult cost = %d\n"
3702 "SImode short constant mult cost = %d\n"
3703 "DImode multipliciation cost = %d\n"
3704 "SImode division cost = %d\n"
3705 "DImode division cost = %d\n"
3706 "Simple fp operation cost = %d\n"
3707 "DFmode multiplication cost = %d\n"
3708 "SFmode division cost = %d\n"
3709 "DFmode division cost = %d\n"
3710 "cache line size = %d\n"
3711 "l1 cache size = %d\n"
3712 "l2 cache size = %d\n"
3713 "simultaneous prefetches = %d\n"
3716 rs6000_cost
->mulsi_const
,
3717 rs6000_cost
->mulsi_const9
,
3725 rs6000_cost
->cache_line_size
,
3726 rs6000_cost
->l1_cache_size
,
3727 rs6000_cost
->l2_cache_size
,
3728 rs6000_cost
->simultaneous_prefetches
);
3733 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3736 darwin_rs6000_override_options (void)
3738 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3740 rs6000_altivec_abi
= 1;
3741 TARGET_ALTIVEC_VRSAVE
= 1;
3742 rs6000_current_abi
= ABI_DARWIN
;
3744 if (DEFAULT_ABI
== ABI_DARWIN
3746 darwin_one_byte_bool
= 1;
3748 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3750 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3751 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3755 rs6000_default_long_calls
= 1;
3756 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3759 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3761 if (!flag_mkernel
&& !flag_apple_kext
3763 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3764 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3766 /* Unless the user (not the configurer) has explicitly overridden
3767 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3768 G4 unless targeting the kernel. */
3771 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3772 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3773 && ! global_options_set
.x_rs6000_cpu_index
)
3775 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3780 /* If not otherwise specified by a target, make 'long double' equivalent to
3783 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3784 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3787 /* Return the builtin mask of the various options used that could affect which
3788 builtins were used. In the past we used target_flags, but we've run out of
3789 bits, and some options like PAIRED are no longer in target_flags. */
3792 rs6000_builtin_mask_calculate (void)
3794 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3795 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3796 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3797 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
3798 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3799 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3800 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3801 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3802 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3803 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3804 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3805 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3806 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3807 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3808 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3809 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3810 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3811 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3812 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3813 | ((TARGET_LONG_DOUBLE_128
) ? RS6000_BTM_LDBL128
: 0)
3814 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0));
3817 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3818 to clobber the XER[CA] bit because clobbering that bit without telling
3819 the compiler worked just fine with versions of GCC before GCC 5, and
3820 breaking a lot of older code in ways that are hard to track down is
3821 not such a great idea. */
3824 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3825 vec
<const char *> &/*constraints*/,
3826 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3828 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3829 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3833 /* Override command line options.
3835 Combine build-specific configuration information with options
3836 specified on the command line to set various state variables which
3837 influence code generation, optimization, and expansion of built-in
3838 functions. Assure that command-line configuration preferences are
3839 compatible with each other and with the build configuration; issue
3840 warnings while adjusting configuration or error messages while
3841 rejecting configuration.
3843 Upon entry to this function:
3845 This function is called once at the beginning of
3846 compilation, and then again at the start and end of compiling
3847 each section of code that has a different configuration, as
3848 indicated, for example, by adding the
3850 __attribute__((__target__("cpu=power9")))
3852 qualifier to a function definition or, for example, by bracketing
3855 #pragma GCC target("altivec")
3859 #pragma GCC reset_options
3861 directives. Parameter global_init_p is true for the initial
3862 invocation, which initializes global variables, and false for all
3863 subsequent invocations.
3866 Various global state information is assumed to be valid. This
3867 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3868 default CPU specified at build configure time, TARGET_DEFAULT,
3869 representing the default set of option flags for the default
3870 target, and global_options_set.x_rs6000_isa_flags, representing
3871 which options were requested on the command line.
3873 Upon return from this function:
3875 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3876 was set by name on the command line. Additionally, if certain
3877 attributes are automatically enabled or disabled by this function
3878 in order to assure compatibility between options and
3879 configuration, the flags associated with those attributes are
3880 also set. By setting these "explicit bits", we avoid the risk
3881 that other code might accidentally overwrite these particular
3882 attributes with "default values".
3884 The various bits of rs6000_isa_flags are set to indicate the
3885 target options that have been selected for the most current
3886 compilation efforts. This has the effect of also turning on the
3887 associated TARGET_XXX values since these are macros which are
3888 generally defined to test the corresponding bit of the
3889 rs6000_isa_flags variable.
3891 The variable rs6000_builtin_mask is set to represent the target
3892 options for the most current compilation efforts, consistent with
3893 the current contents of rs6000_isa_flags. This variable controls
3894 expansion of built-in functions.
3896 Various other global variables and fields of global structures
3897 (over 50 in all) are initialized to reflect the desired options
3898 for the most current compilation efforts. */
3901 rs6000_option_override_internal (bool global_init_p
)
3904 bool have_cpu
= false;
3906 /* The default cpu requested at configure time, if any. */
3907 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
3909 HOST_WIDE_INT set_masks
;
3910 HOST_WIDE_INT ignore_masks
;
3913 struct cl_target_option
*main_target_opt
3914 = ((global_init_p
|| target_option_default_node
== NULL
)
3915 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3917 /* Print defaults. */
3918 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
3919 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
3921 /* Remember the explicit arguments. */
3923 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3925 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3926 library functions, so warn about it. The flag may be useful for
3927 performance studies from time to time though, so don't disable it
3929 if (global_options_set
.x_rs6000_alignment_flags
3930 && rs6000_alignment_flags
== MASK_ALIGN_POWER
3931 && DEFAULT_ABI
== ABI_DARWIN
3933 warning (0, "%qs is not supported for 64-bit Darwin;"
3934 " it is incompatible with the installed C and C++ libraries",
3937 /* Numerous experiment shows that IRA based loop pressure
3938 calculation works better for RTL loop invariant motion on targets
3939 with enough (>= 32) registers. It is an expensive optimization.
3940 So it is on only for peak performance. */
3941 if (optimize
>= 3 && global_init_p
3942 && !global_options_set
.x_flag_ira_loop_pressure
)
3943 flag_ira_loop_pressure
= 1;
3945 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3946 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3947 options were already specified. */
3948 if (flag_sanitize
& SANITIZE_USER_ADDRESS
3949 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
3950 flag_asynchronous_unwind_tables
= 1;
3952 /* Set the pointer size. */
3955 rs6000_pmode
= (int)DImode
;
3956 rs6000_pointer_size
= 64;
3960 rs6000_pmode
= (int)SImode
;
3961 rs6000_pointer_size
= 32;
3964 /* Some OSs don't support saving the high part of 64-bit registers on context
3965 switch. Other OSs don't support saving Altivec registers. On those OSs,
3966 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3967 if the user wants either, the user must explicitly specify them and we
3968 won't interfere with the user's specification. */
3970 set_masks
= POWERPC_MASKS
;
3971 #ifdef OS_MISSING_POWERPC64
3972 if (OS_MISSING_POWERPC64
)
3973 set_masks
&= ~OPTION_MASK_POWERPC64
;
3975 #ifdef OS_MISSING_ALTIVEC
3976 if (OS_MISSING_ALTIVEC
)
3977 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
3978 | OTHER_VSX_VECTOR_MASKS
);
3981 /* Don't override by the processor default if given explicitly. */
3982 set_masks
&= ~rs6000_isa_flags_explicit
;
3984 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3985 the cpu in a target attribute or pragma, but did not specify a tuning
3986 option, use the cpu for the tuning option rather than the option specified
3987 with -mtune on the command line. Process a '--with-cpu' configuration
3988 request as an implicit --cpu. */
3989 if (rs6000_cpu_index
>= 0)
3991 cpu_index
= rs6000_cpu_index
;
3994 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
3996 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
3999 else if (implicit_cpu
)
4001 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (implicit_cpu
);
4006 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4007 const char *default_cpu
= ((!TARGET_POWERPC64
)
4009 : ((BYTES_BIG_ENDIAN
)
4013 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4017 gcc_assert (cpu_index
>= 0);
4021 #ifndef HAVE_AS_POWER9
4022 if (processor_target_table
[rs6000_cpu_index
].processor
4023 == PROCESSOR_POWER9
)
4026 warning (0, "will not generate power9 instructions because "
4027 "assembler lacks power9 support");
4030 #ifndef HAVE_AS_POWER8
4031 if (processor_target_table
[rs6000_cpu_index
].processor
4032 == PROCESSOR_POWER8
)
4035 warning (0, "will not generate power8 instructions because "
4036 "assembler lacks power8 support");
4039 #ifndef HAVE_AS_POPCNTD
4040 if (processor_target_table
[rs6000_cpu_index
].processor
4041 == PROCESSOR_POWER7
)
4044 warning (0, "will not generate power7 instructions because "
4045 "assembler lacks power7 support");
4049 if (processor_target_table
[rs6000_cpu_index
].processor
4050 == PROCESSOR_POWER6
)
4053 warning (0, "will not generate power6 instructions because "
4054 "assembler lacks power6 support");
4057 #ifndef HAVE_AS_POPCNTB
4058 if (processor_target_table
[rs6000_cpu_index
].processor
4059 == PROCESSOR_POWER5
)
4062 warning (0, "will not generate power5 instructions because "
4063 "assembler lacks power5 support");
4069 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4070 const char *default_cpu
= (!TARGET_POWERPC64
4076 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4080 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4081 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4082 with those from the cpu, except for options that were explicitly set. If
4083 we don't have a cpu, do not override the target bits set in
4087 rs6000_isa_flags
&= ~set_masks
;
4088 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
4093 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4094 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4095 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4096 to using rs6000_isa_flags, we need to do the initialization here.
4098 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4099 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4100 HOST_WIDE_INT flags
= ((TARGET_DEFAULT
) ? TARGET_DEFAULT
4101 : processor_target_table
[cpu_index
].target_enable
);
4102 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
4105 if (rs6000_tune_index
>= 0)
4106 tune_index
= rs6000_tune_index
;
4108 rs6000_tune_index
= tune_index
= cpu_index
;
4112 enum processor_type tune_proc
4113 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
4116 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
4117 if (processor_target_table
[i
].processor
== tune_proc
)
4119 rs6000_tune_index
= tune_index
= i
;
4124 gcc_assert (tune_index
>= 0);
4125 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
4127 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
4128 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
4129 || rs6000_cpu
== PROCESSOR_PPCE5500
)
4132 error ("AltiVec not supported in this target");
4135 /* If we are optimizing big endian systems for space, use the load/store
4136 multiple and string instructions. */
4137 if (BYTES_BIG_ENDIAN
&& optimize_size
)
4138 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
4139 | OPTION_MASK_STRING
);
4141 /* Don't allow -mmultiple or -mstring on little endian systems
4142 unless the cpu is a 750, because the hardware doesn't support the
4143 instructions used in little endian mode, and causes an alignment
4144 trap. The 750 does not cause an alignment trap (except when the
4145 target is unaligned). */
4147 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
4149 if (TARGET_MULTIPLE
)
4151 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
4152 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
4153 warning (0, "%qs is not supported on little endian systems",
4159 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4160 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
4161 warning (0, "%qs is not supported on little endian systems",
4166 /* If little-endian, default to -mstrict-align on older processors.
4167 Testing for htm matches power8 and later. */
4168 if (!BYTES_BIG_ENDIAN
4169 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
4170 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
4172 /* -maltivec={le,be} implies -maltivec. */
4173 if (rs6000_altivec_element_order
!= 0)
4174 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
4176 /* Disallow -maltivec=le in big endian mode for now. This is not
4177 known to be useful for anyone. */
4178 if (BYTES_BIG_ENDIAN
&& rs6000_altivec_element_order
== 1)
4180 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4181 rs6000_altivec_element_order
= 0;
4184 if (!rs6000_fold_gimple
)
4186 "gimple folding of rs6000 builtins has been disabled.\n");
4188 /* Add some warnings for VSX. */
4191 const char *msg
= NULL
;
4192 if (!TARGET_HARD_FLOAT
|| !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
4194 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4195 msg
= N_("-mvsx requires hardware floating point");
4198 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4199 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4202 else if (TARGET_PAIRED_FLOAT
)
4203 msg
= N_("-mvsx and -mpaired are incompatible");
4204 else if (TARGET_AVOID_XFORM
> 0)
4205 msg
= N_("-mvsx needs indexed addressing");
4206 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
4207 & OPTION_MASK_ALTIVEC
))
4209 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4210 msg
= N_("-mvsx and -mno-altivec are incompatible");
4212 msg
= N_("-mno-altivec disables vsx");
4218 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4219 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4223 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4224 the -mcpu setting to enable options that conflict. */
4225 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4226 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4227 | OPTION_MASK_ALTIVEC
4228 | OPTION_MASK_VSX
)) != 0)
4229 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4230 | OPTION_MASK_DIRECT_MOVE
)
4231 & ~rs6000_isa_flags_explicit
);
4233 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4234 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4236 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4237 off all of the options that depend on those flags. */
4238 ignore_masks
= rs6000_disable_incompatible_switches ();
4240 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4241 unless the user explicitly used the -mno-<option> to disable the code. */
4242 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4243 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4244 else if (TARGET_P9_MINMAX
)
4248 if (cpu_index
== PROCESSOR_POWER9
)
4250 /* legacy behavior: allow -mcpu=power9 with certain
4251 capabilities explicitly disabled. */
4252 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4255 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4256 "for <xxx> less than power9", "-mcpu");
4258 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4259 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4260 & rs6000_isa_flags_explicit
))
4261 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4262 were explicitly cleared. */
4263 error ("%qs incompatible with explicitly disabled options",
4266 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4268 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4269 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4270 else if (TARGET_VSX
)
4271 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4272 else if (TARGET_POPCNTD
)
4273 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4274 else if (TARGET_DFP
)
4275 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4276 else if (TARGET_CMPB
)
4277 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4278 else if (TARGET_FPRND
)
4279 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4280 else if (TARGET_POPCNTB
)
4281 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4282 else if (TARGET_ALTIVEC
)
4283 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4285 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4287 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4288 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4289 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4292 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4294 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4295 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4296 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4299 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4301 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4302 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4303 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4306 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4308 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4309 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4310 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4311 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4313 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4314 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4315 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4319 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4321 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4322 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4326 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4328 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4329 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4330 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4333 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4334 silently turn off quad memory mode. */
4335 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4337 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4338 warning (0, N_("-mquad-memory requires 64-bit mode"));
4340 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4341 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4343 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4344 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4347 /* Non-atomic quad memory load/store are disabled for little endian, since
4348 the words are reversed, but atomic operations can still be done by
4349 swapping the words. */
4350 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4352 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4353 warning (0, N_("-mquad-memory is not available in little endian "
4356 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4359 /* Assume if the user asked for normal quad memory instructions, they want
4360 the atomic versions as well, unless they explicity told us not to use quad
4361 word atomic instructions. */
4362 if (TARGET_QUAD_MEMORY
4363 && !TARGET_QUAD_MEMORY_ATOMIC
4364 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4365 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4367 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4368 generating power8 instructions. */
4369 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4370 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4371 & OPTION_MASK_P8_FUSION
);
4373 /* Setting additional fusion flags turns on base fusion. */
4374 if (!TARGET_P8_FUSION
&& (TARGET_P8_FUSION_SIGN
|| TARGET_TOC_FUSION
))
4376 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4378 if (TARGET_P8_FUSION_SIGN
)
4379 error ("%qs requires %qs", "-mpower8-fusion-sign",
4382 if (TARGET_TOC_FUSION
)
4383 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4385 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4388 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4391 /* Power9 fusion is a superset over power8 fusion. */
4392 if (TARGET_P9_FUSION
&& !TARGET_P8_FUSION
)
4394 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4396 /* We prefer to not mention undocumented options in
4397 error messages. However, if users have managed to select
4398 power9-fusion without selecting power8-fusion, they
4399 already know about undocumented flags. */
4400 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4401 rs6000_isa_flags
&= ~OPTION_MASK_P9_FUSION
;
4404 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4407 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4408 generating power9 instructions. */
4409 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P9_FUSION
))
4410 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4411 & OPTION_MASK_P9_FUSION
);
4413 /* Power8 does not fuse sign extended loads with the addis. If we are
4414 optimizing at high levels for speed, convert a sign extended load into a
4415 zero extending load, and an explicit sign extension. */
4416 if (TARGET_P8_FUSION
4417 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4418 && optimize_function_for_speed_p (cfun
)
4420 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4422 /* TOC fusion requires 64-bit and medium/large code model. */
4423 if (TARGET_TOC_FUSION
&& !TARGET_POWERPC64
)
4425 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4426 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4427 warning (0, N_("-mtoc-fusion requires 64-bit"));
4430 if (TARGET_TOC_FUSION
&& (TARGET_CMODEL
== CMODEL_SMALL
))
4432 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4433 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4434 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4437 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4439 if (TARGET_P8_FUSION
&& !TARGET_TOC_FUSION
&& TARGET_POWERPC64
4440 && (TARGET_CMODEL
!= CMODEL_SMALL
)
4441 && !(rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
))
4442 rs6000_isa_flags
|= OPTION_MASK_TOC_FUSION
;
4444 /* ISA 3.0 vector instructions include ISA 2.07. */
4445 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4447 /* We prefer to not mention undocumented options in
4448 error messages. However, if users have managed to select
4449 power9-vector without selecting power8-vector, they
4450 already know about undocumented flags. */
4451 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4452 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4453 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4454 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4456 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4457 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4458 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4462 /* OPTION_MASK_P9_VECTOR is explicit and
4463 OPTION_MASK_P8_VECTOR is not explicit. */
4464 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4465 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4469 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4470 support. If we only have ISA 2.06 support, and the user did not specify
4471 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4472 but we don't enable the full vectorization support */
4473 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4474 TARGET_ALLOW_MOVMISALIGN
= 1;
4476 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4478 if (TARGET_ALLOW_MOVMISALIGN
> 0
4479 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4480 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4482 TARGET_ALLOW_MOVMISALIGN
= 0;
4485 /* Determine when unaligned vector accesses are permitted, and when
4486 they are preferred over masked Altivec loads. Note that if
4487 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4488 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4490 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4494 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4495 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4497 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4500 else if (!TARGET_ALLOW_MOVMISALIGN
)
4502 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4503 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4504 "-mallow-movmisalign");
4506 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4510 /* Set long double size before the IEEE 128-bit tests. */
4511 if (!global_options_set
.x_rs6000_long_double_type_size
)
4513 if (main_target_opt
!= NULL
4514 && (main_target_opt
->x_rs6000_long_double_type_size
4515 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
4516 error ("target attribute or pragma changes long double size");
4518 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
4521 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4522 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4523 pick up this default. */
4524 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4525 if (!global_options_set
.x_rs6000_ieeequad
)
4526 rs6000_ieeequad
= 1;
4529 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4530 sytems, but don't enable the __float128 keyword. */
4531 if (TARGET_VSX
&& TARGET_LONG_DOUBLE_128
4532 && (TARGET_FLOAT128_ENABLE_TYPE
|| TARGET_IEEEQUAD
)
4533 && ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_TYPE
) == 0))
4534 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_TYPE
;
4536 /* IEEE 128-bit floating point requires VSX support. */
4539 if (TARGET_FLOAT128_KEYWORD
)
4541 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4542 error ("%qs requires VSX support", "-mfloat128");
4544 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4545 | OPTION_MASK_FLOAT128_KEYWORD
4546 | OPTION_MASK_FLOAT128_HW
);
4549 else if (TARGET_FLOAT128_TYPE
)
4551 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_TYPE
) != 0)
4552 error ("%qs requires VSX support", "-mfloat128-type");
4554 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4555 | OPTION_MASK_FLOAT128_KEYWORD
4556 | OPTION_MASK_FLOAT128_HW
);
4560 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4561 128-bit floating point support to be enabled. */
4562 if (!TARGET_FLOAT128_TYPE
)
4564 if (TARGET_FLOAT128_KEYWORD
)
4566 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4568 error ("%qs requires %qs", "-mfloat128", "-mfloat128-type");
4569 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4570 | OPTION_MASK_FLOAT128_KEYWORD
4571 | OPTION_MASK_FLOAT128_HW
);
4574 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_TYPE
;
4577 if (TARGET_FLOAT128_HW
)
4579 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4581 error ("%qs requires %qs", "-mfloat128-hardware",
4583 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4586 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_TYPE
4587 | OPTION_MASK_FLOAT128_KEYWORD
4588 | OPTION_MASK_FLOAT128_HW
);
4592 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4593 -mfloat128-hardware by default. However, don't enable the __float128
4594 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4595 -mfloat128 option as well if it was not already set. */
4596 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
4597 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4598 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4599 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4601 if (TARGET_FLOAT128_HW
4602 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4604 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4605 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4607 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4610 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4612 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4613 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4615 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4618 if (TARGET_FLOAT128_HW
&& !TARGET_FLOAT128_KEYWORD
4619 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0
4620 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4621 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4623 /* Print the options after updating the defaults. */
4624 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4625 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4627 /* E500mc does "better" if we inline more aggressively. Respect the
4628 user's opinion, though. */
4629 if (rs6000_block_move_inline_limit
== 0
4630 && (rs6000_cpu
== PROCESSOR_PPCE500MC
4631 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4632 || rs6000_cpu
== PROCESSOR_PPCE5500
4633 || rs6000_cpu
== PROCESSOR_PPCE6500
))
4634 rs6000_block_move_inline_limit
= 128;
4636 /* store_one_arg depends on expand_block_move to handle at least the
4637 size of reg_parm_stack_space. */
4638 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4639 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4643 /* If the appropriate debug option is enabled, replace the target hooks
4644 with debug versions that call the real version and then prints
4645 debugging information. */
4646 if (TARGET_DEBUG_COST
)
4648 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4649 targetm
.address_cost
= rs6000_debug_address_cost
;
4650 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4653 if (TARGET_DEBUG_ADDR
)
4655 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4656 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4657 rs6000_secondary_reload_class_ptr
4658 = rs6000_debug_secondary_reload_class
;
4659 rs6000_secondary_memory_needed_ptr
4660 = rs6000_debug_secondary_memory_needed
;
4661 rs6000_cannot_change_mode_class_ptr
4662 = rs6000_debug_cannot_change_mode_class
;
4663 rs6000_preferred_reload_class_ptr
4664 = rs6000_debug_preferred_reload_class
;
4665 rs6000_legitimize_reload_address_ptr
4666 = rs6000_debug_legitimize_reload_address
;
4667 rs6000_mode_dependent_address_ptr
4668 = rs6000_debug_mode_dependent_address
;
4671 if (rs6000_veclibabi_name
)
4673 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4674 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4677 error ("unknown vectorization library ABI type (%qs) for "
4678 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4684 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4685 target attribute or pragma which automatically enables both options,
4686 unless the altivec ABI was set. This is set by default for 64-bit, but
4688 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4689 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4690 | OPTION_MASK_FLOAT128_TYPE
4691 | OPTION_MASK_FLOAT128_KEYWORD
)
4692 & ~rs6000_isa_flags_explicit
);
4694 /* Enable Altivec ABI for AIX -maltivec. */
4695 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4697 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4698 error ("target attribute or pragma changes AltiVec ABI");
4700 rs6000_altivec_abi
= 1;
4703 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4704 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4705 be explicitly overridden in either case. */
4708 if (!global_options_set
.x_rs6000_altivec_abi
4709 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4711 if (main_target_opt
!= NULL
&&
4712 !main_target_opt
->x_rs6000_altivec_abi
)
4713 error ("target attribute or pragma changes AltiVec ABI");
4715 rs6000_altivec_abi
= 1;
4719 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4720 So far, the only darwin64 targets are also MACH-O. */
4722 && DEFAULT_ABI
== ABI_DARWIN
4725 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4726 error ("target attribute or pragma changes darwin64 ABI");
4729 rs6000_darwin64_abi
= 1;
4730 /* Default to natural alignment, for better performance. */
4731 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4735 /* Place FP constants in the constant pool instead of TOC
4736 if section anchors enabled. */
4737 if (flag_section_anchors
4738 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4739 TARGET_NO_FP_IN_TOC
= 1;
4741 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4742 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4744 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4745 SUBTARGET_OVERRIDE_OPTIONS
;
4747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4748 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4750 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4751 SUB3TARGET_OVERRIDE_OPTIONS
;
4754 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4755 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4757 /* For the E500 family of cores, reset the single/double FP flags to let us
4758 check that they remain constant across attributes or pragmas. Also,
4759 clear a possible request for string instructions, not supported and which
4760 we might have silently queried above for -Os.
4762 For other families, clear ISEL in case it was set implicitly.
4767 case PROCESSOR_PPC8540
:
4768 case PROCESSOR_PPC8548
:
4769 case PROCESSOR_PPCE500MC
:
4770 case PROCESSOR_PPCE500MC64
:
4771 case PROCESSOR_PPCE5500
:
4772 case PROCESSOR_PPCE6500
:
4774 rs6000_single_float
= 0;
4775 rs6000_double_float
= 0;
4777 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
4783 if (have_cpu
&& !(rs6000_isa_flags_explicit
& OPTION_MASK_ISEL
))
4784 rs6000_isa_flags
&= ~OPTION_MASK_ISEL
;
4789 if (main_target_opt
)
4791 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
4792 error ("target attribute or pragma changes single precision floating "
4794 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
4795 error ("target attribute or pragma changes double precision floating "
4799 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
4800 && rs6000_cpu
!= PROCESSOR_POWER5
4801 && rs6000_cpu
!= PROCESSOR_POWER6
4802 && rs6000_cpu
!= PROCESSOR_POWER7
4803 && rs6000_cpu
!= PROCESSOR_POWER8
4804 && rs6000_cpu
!= PROCESSOR_POWER9
4805 && rs6000_cpu
!= PROCESSOR_PPCA2
4806 && rs6000_cpu
!= PROCESSOR_CELL
4807 && rs6000_cpu
!= PROCESSOR_PPC476
);
4808 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
4809 || rs6000_cpu
== PROCESSOR_POWER5
4810 || rs6000_cpu
== PROCESSOR_POWER7
4811 || rs6000_cpu
== PROCESSOR_POWER8
);
4812 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
4813 || rs6000_cpu
== PROCESSOR_POWER5
4814 || rs6000_cpu
== PROCESSOR_POWER6
4815 || rs6000_cpu
== PROCESSOR_POWER7
4816 || rs6000_cpu
== PROCESSOR_POWER8
4817 || rs6000_cpu
== PROCESSOR_POWER9
4818 || rs6000_cpu
== PROCESSOR_PPCE500MC
4819 || rs6000_cpu
== PROCESSOR_PPCE500MC64
4820 || rs6000_cpu
== PROCESSOR_PPCE5500
4821 || rs6000_cpu
== PROCESSOR_PPCE6500
);
4823 /* Allow debug switches to override the above settings. These are set to -1
4824 in rs6000.opt to indicate the user hasn't directly set the switch. */
4825 if (TARGET_ALWAYS_HINT
>= 0)
4826 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4828 if (TARGET_SCHED_GROUPS
>= 0)
4829 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4831 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4832 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4834 rs6000_sched_restricted_insns_priority
4835 = (rs6000_sched_groups
? 1 : 0);
4837 /* Handle -msched-costly-dep option. */
4838 rs6000_sched_costly_dep
4839 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4841 if (rs6000_sched_costly_dep_str
)
4843 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4844 rs6000_sched_costly_dep
= no_dep_costly
;
4845 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4846 rs6000_sched_costly_dep
= all_deps_costly
;
4847 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4848 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4849 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4850 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4852 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4853 atoi (rs6000_sched_costly_dep_str
));
4856 /* Handle -minsert-sched-nops option. */
4857 rs6000_sched_insert_nops
4858 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4860 if (rs6000_sched_insert_nops_str
)
4862 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4863 rs6000_sched_insert_nops
= sched_finish_none
;
4864 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4865 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4866 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4867 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4869 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4870 atoi (rs6000_sched_insert_nops_str
));
4873 /* Handle stack protector */
4874 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4875 #ifdef TARGET_THREAD_SSP_OFFSET
4876 rs6000_stack_protector_guard
= SSP_TLS
;
4878 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4881 #ifdef TARGET_THREAD_SSP_OFFSET
4882 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4883 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4886 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4889 const char *str
= rs6000_stack_protector_guard_offset_str
;
4892 long offset
= strtol (str
, &endp
, 0);
4893 if (!*str
|| *endp
|| errno
)
4894 error ("%qs is not a valid number in %qs", str
,
4895 "-mstack-protector-guard-offset=");
4897 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4898 || (TARGET_64BIT
&& (offset
& 3)))
4899 error ("%qs is not a valid offset in %qs", str
,
4900 "-mstack-protector-guard-offset=");
4902 rs6000_stack_protector_guard_offset
= offset
;
4905 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4907 const char *str
= rs6000_stack_protector_guard_reg_str
;
4908 int reg
= decode_reg_name (str
);
4910 if (!IN_RANGE (reg
, 1, 31))
4911 error ("%qs is not a valid base register in %qs", str
,
4912 "-mstack-protector-guard-reg=");
4914 rs6000_stack_protector_guard_reg
= reg
;
4917 if (rs6000_stack_protector_guard
== SSP_TLS
4918 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4919 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4923 #ifdef TARGET_REGNAMES
4924 /* If the user desires alternate register names, copy in the
4925 alternate names now. */
4926 if (TARGET_REGNAMES
)
4927 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4930 /* Set aix_struct_return last, after the ABI is determined.
4931 If -maix-struct-return or -msvr4-struct-return was explicitly
4932 used, don't override with the ABI default. */
4933 if (!global_options_set
.x_aix_struct_return
)
4934 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4937 /* IBM XL compiler defaults to unsigned bitfields. */
4938 if (TARGET_XL_COMPAT
)
4939 flag_signed_bitfields
= 0;
4942 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4943 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4945 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4947 /* We can only guarantee the availability of DI pseudo-ops when
4948 assembling for 64-bit targets. */
4951 targetm
.asm_out
.aligned_op
.di
= NULL
;
4952 targetm
.asm_out
.unaligned_op
.di
= NULL
;
4956 /* Set branch target alignment, if not optimizing for size. */
4959 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4960 aligned 8byte to avoid misprediction by the branch predictor. */
4961 if (rs6000_cpu
== PROCESSOR_TITAN
4962 || rs6000_cpu
== PROCESSOR_CELL
)
4964 if (align_functions
<= 0)
4965 align_functions
= 8;
4966 if (align_jumps
<= 0)
4968 if (align_loops
<= 0)
4971 if (rs6000_align_branch_targets
)
4973 if (align_functions
<= 0)
4974 align_functions
= 16;
4975 if (align_jumps
<= 0)
4977 if (align_loops
<= 0)
4979 can_override_loop_align
= 1;
4983 if (align_jumps_max_skip
<= 0)
4984 align_jumps_max_skip
= 15;
4985 if (align_loops_max_skip
<= 0)
4986 align_loops_max_skip
= 15;
4989 /* Arrange to save and restore machine status around nested functions. */
4990 init_machine_status
= rs6000_init_machine_status
;
4992 /* We should always be splitting complex arguments, but we can't break
4993 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4994 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
4995 targetm
.calls
.split_complex_arg
= NULL
;
4997 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4998 if (DEFAULT_ABI
== ABI_AIX
)
4999 targetm
.calls
.custom_function_descriptors
= 0;
5002 /* Initialize rs6000_cost with the appropriate target costs. */
5004 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
5008 case PROCESSOR_RS64A
:
5009 rs6000_cost
= &rs64a_cost
;
5012 case PROCESSOR_MPCCORE
:
5013 rs6000_cost
= &mpccore_cost
;
5016 case PROCESSOR_PPC403
:
5017 rs6000_cost
= &ppc403_cost
;
5020 case PROCESSOR_PPC405
:
5021 rs6000_cost
= &ppc405_cost
;
5024 case PROCESSOR_PPC440
:
5025 rs6000_cost
= &ppc440_cost
;
5028 case PROCESSOR_PPC476
:
5029 rs6000_cost
= &ppc476_cost
;
5032 case PROCESSOR_PPC601
:
5033 rs6000_cost
= &ppc601_cost
;
5036 case PROCESSOR_PPC603
:
5037 rs6000_cost
= &ppc603_cost
;
5040 case PROCESSOR_PPC604
:
5041 rs6000_cost
= &ppc604_cost
;
5044 case PROCESSOR_PPC604e
:
5045 rs6000_cost
= &ppc604e_cost
;
5048 case PROCESSOR_PPC620
:
5049 rs6000_cost
= &ppc620_cost
;
5052 case PROCESSOR_PPC630
:
5053 rs6000_cost
= &ppc630_cost
;
5056 case PROCESSOR_CELL
:
5057 rs6000_cost
= &ppccell_cost
;
5060 case PROCESSOR_PPC750
:
5061 case PROCESSOR_PPC7400
:
5062 rs6000_cost
= &ppc750_cost
;
5065 case PROCESSOR_PPC7450
:
5066 rs6000_cost
= &ppc7450_cost
;
5069 case PROCESSOR_PPC8540
:
5070 case PROCESSOR_PPC8548
:
5071 rs6000_cost
= &ppc8540_cost
;
5074 case PROCESSOR_PPCE300C2
:
5075 case PROCESSOR_PPCE300C3
:
5076 rs6000_cost
= &ppce300c2c3_cost
;
5079 case PROCESSOR_PPCE500MC
:
5080 rs6000_cost
= &ppce500mc_cost
;
5083 case PROCESSOR_PPCE500MC64
:
5084 rs6000_cost
= &ppce500mc64_cost
;
5087 case PROCESSOR_PPCE5500
:
5088 rs6000_cost
= &ppce5500_cost
;
5091 case PROCESSOR_PPCE6500
:
5092 rs6000_cost
= &ppce6500_cost
;
5095 case PROCESSOR_TITAN
:
5096 rs6000_cost
= &titan_cost
;
5099 case PROCESSOR_POWER4
:
5100 case PROCESSOR_POWER5
:
5101 rs6000_cost
= &power4_cost
;
5104 case PROCESSOR_POWER6
:
5105 rs6000_cost
= &power6_cost
;
5108 case PROCESSOR_POWER7
:
5109 rs6000_cost
= &power7_cost
;
5112 case PROCESSOR_POWER8
:
5113 rs6000_cost
= &power8_cost
;
5116 case PROCESSOR_POWER9
:
5117 rs6000_cost
= &power9_cost
;
5120 case PROCESSOR_PPCA2
:
5121 rs6000_cost
= &ppca2_cost
;
5130 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
5131 rs6000_cost
->simultaneous_prefetches
,
5132 global_options
.x_param_values
,
5133 global_options_set
.x_param_values
);
5134 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
5135 global_options
.x_param_values
,
5136 global_options_set
.x_param_values
);
5137 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
5138 rs6000_cost
->cache_line_size
,
5139 global_options
.x_param_values
,
5140 global_options_set
.x_param_values
);
5141 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
5142 global_options
.x_param_values
,
5143 global_options_set
.x_param_values
);
5145 /* Increase loop peeling limits based on performance analysis. */
5146 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
5147 global_options
.x_param_values
,
5148 global_options_set
.x_param_values
);
5149 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
5150 global_options
.x_param_values
,
5151 global_options_set
.x_param_values
);
5153 /* Use the 'model' -fsched-pressure algorithm by default. */
5154 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
5155 SCHED_PRESSURE_MODEL
,
5156 global_options
.x_param_values
,
5157 global_options_set
.x_param_values
);
5159 /* If using typedef char *va_list, signal that
5160 __builtin_va_start (&ap, 0) can be optimized to
5161 ap = __builtin_next_arg (0). */
5162 if (DEFAULT_ABI
!= ABI_V4
)
5163 targetm
.expand_builtin_va_start
= NULL
;
5166 /* Set up single/double float flags.
5167 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5168 then set both flags. */
5169 if (TARGET_HARD_FLOAT
&& rs6000_single_float
== 0 && rs6000_double_float
== 0)
5170 rs6000_single_float
= rs6000_double_float
= 1;
5172 /* If not explicitly specified via option, decide whether to generate indexed
5173 load/store instructions. A value of -1 indicates that the
5174 initial value of this variable has not been overwritten. During
5175 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5176 if (TARGET_AVOID_XFORM
== -1)
5177 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5178 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5179 need indexed accesses and the type used is the scalar type of the element
5180 being loaded or stored. */
5181 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
5182 && !TARGET_ALTIVEC
);
5184 /* Set the -mrecip options. */
5185 if (rs6000_recip_name
)
5187 char *p
= ASTRDUP (rs6000_recip_name
);
5189 unsigned int mask
, i
;
5192 while ((q
= strtok (p
, ",")) != NULL
)
5203 if (!strcmp (q
, "default"))
5204 mask
= ((TARGET_RECIP_PRECISION
)
5205 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
5208 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
5209 if (!strcmp (q
, recip_options
[i
].string
))
5211 mask
= recip_options
[i
].mask
;
5215 if (i
== ARRAY_SIZE (recip_options
))
5217 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
5225 rs6000_recip_control
&= ~mask
;
5227 rs6000_recip_control
|= mask
;
5231 /* Set the builtin mask of the various options used that could affect which
5232 builtins were used. In the past we used target_flags, but we've run out
5233 of bits, and some options like PAIRED are no longer in target_flags. */
5234 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
5235 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
5236 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
5237 rs6000_builtin_mask
);
5239 /* Initialize all of the registers. */
5240 rs6000_init_hard_regno_mode_ok (global_init_p
);
5242 /* Save the initial options in case the user does function specific options */
5244 target_option_default_node
= target_option_current_node
5245 = build_target_option_node (&global_options
);
5247 /* If not explicitly specified via option, decide whether to generate the
5248 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5249 if (TARGET_LINK_STACK
== -1)
5250 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
5255 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5256 define the target cpu type. */
5259 rs6000_option_override (void)
5261 (void) rs6000_option_override_internal (true);
5265 /* Implement targetm.vectorize.builtin_mask_for_load. */
5267 rs6000_builtin_mask_for_load (void)
5269 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5270 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
5271 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
5272 return altivec_builtin_mask_for_load
;
5277 /* Implement LOOP_ALIGN. */
5279 rs6000_loop_align (rtx label
)
5284 /* Don't override loop alignment if -falign-loops was specified. */
5285 if (!can_override_loop_align
)
5286 return align_loops_log
;
5288 bb
= BLOCK_FOR_INSN (label
);
5289 ninsns
= num_loop_insns(bb
->loop_father
);
5291 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5292 if (ninsns
> 4 && ninsns
<= 8
5293 && (rs6000_cpu
== PROCESSOR_POWER4
5294 || rs6000_cpu
== PROCESSOR_POWER5
5295 || rs6000_cpu
== PROCESSOR_POWER6
5296 || rs6000_cpu
== PROCESSOR_POWER7
5297 || rs6000_cpu
== PROCESSOR_POWER8
5298 || rs6000_cpu
== PROCESSOR_POWER9
))
5301 return align_loops_log
;
5304 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5306 rs6000_loop_align_max_skip (rtx_insn
*label
)
5308 return (1 << rs6000_loop_align (label
)) - 1;
5311 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5312 after applying N number of iterations. This routine does not determine
5313 how may iterations are required to reach desired alignment. */
5316 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5323 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5326 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5336 /* Assuming that all other types are naturally aligned. CHECKME! */
5341 /* Return true if the vector misalignment factor is supported by the
5344 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5351 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5354 /* Return if movmisalign pattern is not supported for this mode. */
5355 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5358 if (misalignment
== -1)
5360 /* Misalignment factor is unknown at compile time but we know
5361 it's word aligned. */
5362 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5364 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5366 if (element_size
== 64 || element_size
== 32)
5373 /* VSX supports word-aligned vector. */
5374 if (misalignment
% 4 == 0)
5380 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5382 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5383 tree vectype
, int misalign
)
5388 switch (type_of_cost
)
5398 case cond_branch_not_taken
:
5407 case vec_promote_demote
:
5413 case cond_branch_taken
:
5416 case unaligned_load
:
5417 if (TARGET_P9_VECTOR
)
5420 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5423 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5425 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5427 /* Double word aligned. */
5435 /* Double word aligned. */
5439 /* Unknown misalignment. */
5452 /* Misaligned loads are not supported. */
5457 case unaligned_store
:
5458 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5461 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5463 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5465 /* Double word aligned. */
5473 /* Double word aligned. */
5477 /* Unknown misalignment. */
5490 /* Misaligned stores are not supported. */
5496 /* This is a rough approximation assuming non-constant elements
5497 constructed into a vector via element insertion. FIXME:
5498 vec_construct is not granular enough for uniformly good
5499 decisions. If the initialization is a splat, this is
5500 cheaper than we estimate. Improve this someday. */
5501 elem_type
= TREE_TYPE (vectype
);
5502 /* 32-bit vectors loaded into registers are stored as double
5503 precision, so we need 2 permutes, 2 converts, and 1 merge
5504 to construct a vector of short floats from them. */
5505 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5506 && TYPE_PRECISION (elem_type
) == 32)
5508 /* On POWER9, integer vector types are built up in GPRs and then
5509 use a direct move (2 cycles). For POWER8 this is even worse,
5510 as we need two direct moves and a merge, and the direct moves
5512 else if (INTEGRAL_TYPE_P (elem_type
))
5514 if (TARGET_P9_VECTOR
)
5515 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5517 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5520 /* V2DFmode doesn't need a direct move. */
5528 /* Implement targetm.vectorize.preferred_simd_mode. */
5531 rs6000_preferred_simd_mode (machine_mode mode
)
5540 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5557 if (TARGET_PAIRED_FLOAT
5563 typedef struct _rs6000_cost_data
5565 struct loop
*loop_info
;
5569 /* Test for likely overcommitment of vector hardware resources. If a
5570 loop iteration is relatively large, and too large a percentage of
5571 instructions in the loop are vectorized, the cost model may not
5572 adequately reflect delays from unavailable vector resources.
5573 Penalize the loop body cost for this case. */
5576 rs6000_density_test (rs6000_cost_data
*data
)
5578 const int DENSITY_PCT_THRESHOLD
= 85;
5579 const int DENSITY_SIZE_THRESHOLD
= 70;
5580 const int DENSITY_PENALTY
= 10;
5581 struct loop
*loop
= data
->loop_info
;
5582 basic_block
*bbs
= get_loop_body (loop
);
5583 int nbbs
= loop
->num_nodes
;
5584 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5587 for (i
= 0; i
< nbbs
; i
++)
5589 basic_block bb
= bbs
[i
];
5590 gimple_stmt_iterator gsi
;
5592 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5594 gimple
*stmt
= gsi_stmt (gsi
);
5595 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5597 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5598 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5604 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5606 if (density_pct
> DENSITY_PCT_THRESHOLD
5607 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5609 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5610 if (dump_enabled_p ())
5611 dump_printf_loc (MSG_NOTE
, vect_location
,
5612 "density %d%%, cost %d exceeds threshold, penalizing "
5613 "loop body cost by %d%%", density_pct
,
5614 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5618 /* Implement targetm.vectorize.init_cost. */
5620 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5621 instruction is needed by the vectorization. */
5622 static bool rs6000_vect_nonmem
;
5625 rs6000_init_cost (struct loop
*loop_info
)
5627 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5628 data
->loop_info
= loop_info
;
5629 data
->cost
[vect_prologue
] = 0;
5630 data
->cost
[vect_body
] = 0;
5631 data
->cost
[vect_epilogue
] = 0;
5632 rs6000_vect_nonmem
= false;
5636 /* Implement targetm.vectorize.add_stmt_cost. */
5639 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5640 struct _stmt_vec_info
*stmt_info
, int misalign
,
5641 enum vect_cost_model_location where
)
5643 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5644 unsigned retval
= 0;
5646 if (flag_vect_cost_model
)
5648 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5649 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5651 /* Statements in an inner loop relative to the loop being
5652 vectorized are weighted more heavily. The value here is
5653 arbitrary and could potentially be improved with analysis. */
5654 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5655 count
*= 50; /* FIXME. */
5657 retval
= (unsigned) (count
* stmt_cost
);
5658 cost_data
->cost
[where
] += retval
;
5660 /* Check whether we're doing something other than just a copy loop.
5661 Not all such loops may be profitably vectorized; see
5662 rs6000_finish_cost. */
5663 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5664 || kind
== vec_promote_demote
|| kind
== vec_construct
5665 || kind
== scalar_to_vec
)
5666 || (where
== vect_body
&& kind
== vector_stmt
))
5667 rs6000_vect_nonmem
= true;
5673 /* Implement targetm.vectorize.finish_cost. */
5676 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5677 unsigned *body_cost
, unsigned *epilogue_cost
)
5679 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5681 if (cost_data
->loop_info
)
5682 rs6000_density_test (cost_data
);
5684 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5685 that require versioning for any reason. The vectorization is at
5686 best a wash inside the loop, and the versioning checks make
5687 profitability highly unlikely and potentially quite harmful. */
5688 if (cost_data
->loop_info
)
5690 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5691 if (!rs6000_vect_nonmem
5692 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5693 && LOOP_REQUIRES_VERSIONING (vec_info
))
5694 cost_data
->cost
[vect_body
] += 10000;
5697 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5698 *body_cost
= cost_data
->cost
[vect_body
];
5699 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5702 /* Implement targetm.vectorize.destroy_cost_data. */
5705 rs6000_destroy_cost_data (void *data
)
5710 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5711 library with vectorized intrinsics. */
5714 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5718 const char *suffix
= NULL
;
5719 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5722 machine_mode el_mode
, in_mode
;
5725 /* Libmass is suitable for unsafe math only as it does not correctly support
5726 parts of IEEE with the required precision such as denormals. Only support
5727 it if we have VSX to use the simd d2 or f4 functions.
5728 XXX: Add variable length support. */
5729 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5732 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5733 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5734 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5735 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5736 if (el_mode
!= in_mode
5772 if (el_mode
== DFmode
&& n
== 2)
5774 bdecl
= mathfn_built_in (double_type_node
, fn
);
5775 suffix
= "d2"; /* pow -> powd2 */
5777 else if (el_mode
== SFmode
&& n
== 4)
5779 bdecl
= mathfn_built_in (float_type_node
, fn
);
5780 suffix
= "4"; /* powf -> powf4 */
5792 gcc_assert (suffix
!= NULL
);
5793 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5797 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5798 strcat (name
, suffix
);
5801 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5802 else if (n_args
== 2)
5803 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5807 /* Build a function declaration for the vectorized function. */
5808 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5809 FUNCTION_DECL
, get_identifier (name
), fntype
);
5810 TREE_PUBLIC (new_fndecl
) = 1;
5811 DECL_EXTERNAL (new_fndecl
) = 1;
5812 DECL_IS_NOVOPS (new_fndecl
) = 1;
5813 TREE_READONLY (new_fndecl
) = 1;
5818 /* Returns a function decl for a vectorized version of the builtin function
5819 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5820 if it is not available. */
5823 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5826 machine_mode in_mode
, out_mode
;
5829 if (TARGET_DEBUG_BUILTIN
)
5830 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5831 combined_fn_name (combined_fn (fn
)),
5832 GET_MODE_NAME (TYPE_MODE (type_out
)),
5833 GET_MODE_NAME (TYPE_MODE (type_in
)));
5835 if (TREE_CODE (type_out
) != VECTOR_TYPE
5836 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5839 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5840 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5841 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5842 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5847 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5848 && out_mode
== DFmode
&& out_n
== 2
5849 && in_mode
== DFmode
&& in_n
== 2)
5850 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5851 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5852 && out_mode
== SFmode
&& out_n
== 4
5853 && in_mode
== SFmode
&& in_n
== 4)
5854 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5855 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5856 && out_mode
== SFmode
&& out_n
== 4
5857 && in_mode
== SFmode
&& in_n
== 4)
5858 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5861 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5862 && out_mode
== DFmode
&& out_n
== 2
5863 && in_mode
== DFmode
&& in_n
== 2)
5864 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5865 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5866 && out_mode
== SFmode
&& out_n
== 4
5867 && in_mode
== SFmode
&& in_n
== 4)
5868 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5869 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5870 && out_mode
== SFmode
&& out_n
== 4
5871 && in_mode
== SFmode
&& in_n
== 4)
5872 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5875 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5876 && out_mode
== DFmode
&& out_n
== 2
5877 && in_mode
== DFmode
&& in_n
== 2)
5878 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5879 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5880 && out_mode
== SFmode
&& out_n
== 4
5881 && in_mode
== SFmode
&& in_n
== 4)
5882 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5883 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5884 && out_mode
== SFmode
&& out_n
== 4
5885 && in_mode
== SFmode
&& in_n
== 4)
5886 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5889 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5890 && out_mode
== DFmode
&& out_n
== 2
5891 && in_mode
== DFmode
&& in_n
== 2)
5892 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5893 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5894 && out_mode
== SFmode
&& out_n
== 4
5895 && in_mode
== SFmode
&& in_n
== 4)
5896 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5897 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5898 && out_mode
== SFmode
&& out_n
== 4
5899 && in_mode
== SFmode
&& in_n
== 4)
5900 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5903 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5904 && out_mode
== DFmode
&& out_n
== 2
5905 && in_mode
== DFmode
&& in_n
== 2)
5906 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5907 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5908 && out_mode
== SFmode
&& out_n
== 4
5909 && in_mode
== SFmode
&& in_n
== 4)
5910 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5911 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5912 && out_mode
== SFmode
&& out_n
== 4
5913 && in_mode
== SFmode
&& in_n
== 4)
5914 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5917 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5918 && flag_unsafe_math_optimizations
5919 && out_mode
== DFmode
&& out_n
== 2
5920 && in_mode
== DFmode
&& in_n
== 2)
5921 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5922 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5923 && flag_unsafe_math_optimizations
5924 && out_mode
== SFmode
&& out_n
== 4
5925 && in_mode
== SFmode
&& in_n
== 4)
5926 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5929 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5930 && !flag_trapping_math
5931 && out_mode
== DFmode
&& out_n
== 2
5932 && in_mode
== DFmode
&& in_n
== 2)
5933 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5934 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5935 && !flag_trapping_math
5936 && out_mode
== SFmode
&& out_n
== 4
5937 && in_mode
== SFmode
&& in_n
== 4)
5938 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5944 /* Generate calls to libmass if appropriate. */
5945 if (rs6000_veclib_handler
)
5946 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5951 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5954 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
5957 machine_mode in_mode
, out_mode
;
5960 if (TARGET_DEBUG_BUILTIN
)
5961 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5962 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
5963 GET_MODE_NAME (TYPE_MODE (type_out
)),
5964 GET_MODE_NAME (TYPE_MODE (type_in
)));
5966 if (TREE_CODE (type_out
) != VECTOR_TYPE
5967 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5970 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5971 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5972 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5973 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5975 enum rs6000_builtins fn
5976 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
5979 case RS6000_BUILTIN_RSQRTF
:
5980 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5981 && out_mode
== SFmode
&& out_n
== 4
5982 && in_mode
== SFmode
&& in_n
== 4)
5983 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
5985 case RS6000_BUILTIN_RSQRT
:
5986 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5987 && out_mode
== DFmode
&& out_n
== 2
5988 && in_mode
== DFmode
&& in_n
== 2)
5989 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
5991 case RS6000_BUILTIN_RECIPF
:
5992 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5993 && out_mode
== SFmode
&& out_n
== 4
5994 && in_mode
== SFmode
&& in_n
== 4)
5995 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
5997 case RS6000_BUILTIN_RECIP
:
5998 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5999 && out_mode
== DFmode
&& out_n
== 2
6000 && in_mode
== DFmode
&& in_n
== 2)
6001 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
6009 /* Default CPU string for rs6000*_file_start functions. */
6010 static const char *rs6000_default_cpu
;
6012 /* Do anything needed at the start of the asm file. */
6015 rs6000_file_start (void)
6018 const char *start
= buffer
;
6019 FILE *file
= asm_out_file
;
6021 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
6023 default_file_start ();
6025 if (flag_verbose_asm
)
6027 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
6029 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
6031 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
6035 if (global_options_set
.x_rs6000_cpu_index
)
6037 fprintf (file
, "%s -mcpu=%s", start
,
6038 processor_target_table
[rs6000_cpu_index
].name
);
6042 if (global_options_set
.x_rs6000_tune_index
)
6044 fprintf (file
, "%s -mtune=%s", start
,
6045 processor_target_table
[rs6000_tune_index
].name
);
6049 if (PPC405_ERRATUM77
)
6051 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
6055 #ifdef USING_ELFOS_H
6056 switch (rs6000_sdata
)
6058 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
6059 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
6060 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
6061 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
6064 if (rs6000_sdata
&& g_switch_value
)
6066 fprintf (file
, "%s -G %d", start
,
6076 #ifdef USING_ELFOS_H
6077 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
6078 && !global_options_set
.x_rs6000_cpu_index
)
6080 fputs ("\t.machine ", asm_out_file
);
6081 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
6082 fputs ("power9\n", asm_out_file
);
6083 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
6084 fputs ("power8\n", asm_out_file
);
6085 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
6086 fputs ("power7\n", asm_out_file
);
6087 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
6088 fputs ("power6\n", asm_out_file
);
6089 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
6090 fputs ("power5\n", asm_out_file
);
6091 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
6092 fputs ("power4\n", asm_out_file
);
6093 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
6094 fputs ("ppc64\n", asm_out_file
);
6096 fputs ("ppc\n", asm_out_file
);
6100 if (DEFAULT_ABI
== ABI_ELFv2
)
6101 fprintf (file
, "\t.abiversion 2\n");
6105 /* Return nonzero if this function is known to have a null epilogue. */
6108 direct_return (void)
6110 if (reload_completed
)
6112 rs6000_stack_t
*info
= rs6000_stack_info ();
6114 if (info
->first_gp_reg_save
== 32
6115 && info
->first_fp_reg_save
== 64
6116 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
6117 && ! info
->lr_save_p
6118 && ! info
->cr_save_p
6119 && info
->vrsave_size
== 0
6127 /* Return the number of instructions it takes to form a constant in an
6128 integer register. */
6131 num_insns_constant_wide (HOST_WIDE_INT value
)
6133 /* signed constant loadable with addi */
6134 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
6137 /* constant loadable with addis */
6138 else if ((value
& 0xffff) == 0
6139 && (value
>> 31 == -1 || value
>> 31 == 0))
6142 else if (TARGET_POWERPC64
)
6144 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
6145 HOST_WIDE_INT high
= value
>> 31;
6147 if (high
== 0 || high
== -1)
6153 return num_insns_constant_wide (high
) + 1;
6155 return num_insns_constant_wide (low
) + 1;
6157 return (num_insns_constant_wide (high
)
6158 + num_insns_constant_wide (low
) + 1);
6166 num_insns_constant (rtx op
, machine_mode mode
)
6168 HOST_WIDE_INT low
, high
;
6170 switch (GET_CODE (op
))
6173 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
6174 && rs6000_is_valid_and_mask (op
, mode
))
6177 return num_insns_constant_wide (INTVAL (op
));
6179 case CONST_WIDE_INT
:
6182 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
6183 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
6184 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
6189 if (mode
== SFmode
|| mode
== SDmode
)
6193 if (DECIMAL_FLOAT_MODE_P (mode
))
6194 REAL_VALUE_TO_TARGET_DECIMAL32
6195 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6197 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6198 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
6202 if (DECIMAL_FLOAT_MODE_P (mode
))
6203 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6205 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6206 high
= l
[WORDS_BIG_ENDIAN
== 0];
6207 low
= l
[WORDS_BIG_ENDIAN
!= 0];
6210 return (num_insns_constant_wide (low
)
6211 + num_insns_constant_wide (high
));
6214 if ((high
== 0 && low
>= 0)
6215 || (high
== -1 && low
< 0))
6216 return num_insns_constant_wide (low
);
6218 else if (rs6000_is_valid_and_mask (op
, mode
))
6222 return num_insns_constant_wide (high
) + 1;
6225 return (num_insns_constant_wide (high
)
6226 + num_insns_constant_wide (low
) + 1);
6234 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6235 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6236 corresponding element of the vector, but for V4SFmode and V2SFmode,
6237 the corresponding "float" is interpreted as an SImode integer. */
6240 const_vector_elt_as_int (rtx op
, unsigned int elt
)
6244 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6245 gcc_assert (GET_MODE (op
) != V2DImode
6246 && GET_MODE (op
) != V2DFmode
);
6248 tmp
= CONST_VECTOR_ELT (op
, elt
);
6249 if (GET_MODE (op
) == V4SFmode
6250 || GET_MODE (op
) == V2SFmode
)
6251 tmp
= gen_lowpart (SImode
, tmp
);
6252 return INTVAL (tmp
);
6255 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6256 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6257 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6258 all items are set to the same value and contain COPIES replicas of the
6259 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6260 operand and the others are set to the value of the operand's msb. */
6263 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
6265 machine_mode mode
= GET_MODE (op
);
6266 machine_mode inner
= GET_MODE_INNER (mode
);
6274 HOST_WIDE_INT splat_val
;
6275 HOST_WIDE_INT msb_val
;
6277 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
6280 nunits
= GET_MODE_NUNITS (mode
);
6281 bitsize
= GET_MODE_BITSIZE (inner
);
6282 mask
= GET_MODE_MASK (inner
);
6284 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6286 msb_val
= val
>= 0 ? 0 : -1;
6288 /* Construct the value to be splatted, if possible. If not, return 0. */
6289 for (i
= 2; i
<= copies
; i
*= 2)
6291 HOST_WIDE_INT small_val
;
6293 small_val
= splat_val
>> bitsize
;
6295 if (splat_val
!= ((HOST_WIDE_INT
)
6296 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
6297 | (small_val
& mask
)))
6299 splat_val
= small_val
;
6302 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6303 if (EASY_VECTOR_15 (splat_val
))
6306 /* Also check if we can splat, and then add the result to itself. Do so if
6307 the value is positive, of if the splat instruction is using OP's mode;
6308 for splat_val < 0, the splat and the add should use the same mode. */
6309 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6310 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6313 /* Also check if are loading up the most significant bit which can be done by
6314 loading up -1 and shifting the value left by -1. */
6315 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6321 /* Check if VAL is present in every STEP-th element, and the
6322 other elements are filled with its most significant bit. */
6323 for (i
= 1; i
< nunits
; ++i
)
6325 HOST_WIDE_INT desired_val
;
6326 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6327 if ((i
& (step
- 1)) == 0)
6330 desired_val
= msb_val
;
6332 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6339 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6340 instruction, filling in the bottom elements with 0 or -1.
6342 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6343 for the number of zeroes to shift in, or negative for the number of 0xff
6346 OP is a CONST_VECTOR. */
6349 vspltis_shifted (rtx op
)
6351 machine_mode mode
= GET_MODE (op
);
6352 machine_mode inner
= GET_MODE_INNER (mode
);
6360 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6363 /* We need to create pseudo registers to do the shift, so don't recognize
6364 shift vector constants after reload. */
6365 if (!can_create_pseudo_p ())
6368 nunits
= GET_MODE_NUNITS (mode
);
6369 mask
= GET_MODE_MASK (inner
);
6371 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6373 /* Check if the value can really be the operand of a vspltis[bhw]. */
6374 if (EASY_VECTOR_15 (val
))
6377 /* Also check if we are loading up the most significant bit which can be done
6378 by loading up -1 and shifting the value left by -1. */
6379 else if (EASY_VECTOR_MSB (val
, inner
))
6385 /* Check if VAL is present in every STEP-th element until we find elements
6386 that are 0 or all 1 bits. */
6387 for (i
= 1; i
< nunits
; ++i
)
6389 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6390 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6392 /* If the value isn't the splat value, check for the remaining elements
6398 for (j
= i
+1; j
< nunits
; ++j
)
6400 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6401 if (const_vector_elt_as_int (op
, elt2
) != 0)
6405 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6408 else if ((elt_val
& mask
) == mask
)
6410 for (j
= i
+1; j
< nunits
; ++j
)
6412 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6413 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6417 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6425 /* If all elements are equal, we don't need to do VLSDOI. */
6430 /* Return true if OP is of the given MODE and can be synthesized
6431 with a vspltisb, vspltish or vspltisw. */
6434 easy_altivec_constant (rtx op
, machine_mode mode
)
6436 unsigned step
, copies
;
6438 if (mode
== VOIDmode
)
6439 mode
= GET_MODE (op
);
6440 else if (mode
!= GET_MODE (op
))
6443 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6445 if (mode
== V2DFmode
)
6446 return zero_constant (op
, mode
);
6448 else if (mode
== V2DImode
)
6450 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6451 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6454 if (zero_constant (op
, mode
))
6457 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6458 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6464 /* V1TImode is a special container for TImode. Ignore for now. */
6465 else if (mode
== V1TImode
)
6468 /* Start with a vspltisw. */
6469 step
= GET_MODE_NUNITS (mode
) / 4;
6472 if (vspltis_constant (op
, step
, copies
))
6475 /* Then try with a vspltish. */
6481 if (vspltis_constant (op
, step
, copies
))
6484 /* And finally a vspltisb. */
6490 if (vspltis_constant (op
, step
, copies
))
6493 if (vspltis_shifted (op
) != 0)
6499 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6500 result is OP. Abort if it is not possible. */
6503 gen_easy_altivec_constant (rtx op
)
6505 machine_mode mode
= GET_MODE (op
);
6506 int nunits
= GET_MODE_NUNITS (mode
);
6507 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6508 unsigned step
= nunits
/ 4;
6509 unsigned copies
= 1;
6511 /* Start with a vspltisw. */
6512 if (vspltis_constant (op
, step
, copies
))
6513 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6515 /* Then try with a vspltish. */
6521 if (vspltis_constant (op
, step
, copies
))
6522 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6524 /* And finally a vspltisb. */
6530 if (vspltis_constant (op
, step
, copies
))
6531 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6536 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6537 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6539 Return the number of instructions needed (1 or 2) into the address pointed
6542 Return the constant that is being split via CONSTANT_PTR. */
6545 xxspltib_constant_p (rtx op
,
6550 size_t nunits
= GET_MODE_NUNITS (mode
);
6552 HOST_WIDE_INT value
;
6555 /* Set the returned values to out of bound values. */
6556 *num_insns_ptr
= -1;
6557 *constant_ptr
= 256;
6559 if (!TARGET_P9_VECTOR
)
6562 if (mode
== VOIDmode
)
6563 mode
= GET_MODE (op
);
6565 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6568 /* Handle (vec_duplicate <constant>). */
6569 if (GET_CODE (op
) == VEC_DUPLICATE
)
6571 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6572 && mode
!= V2DImode
)
6575 element
= XEXP (op
, 0);
6576 if (!CONST_INT_P (element
))
6579 value
= INTVAL (element
);
6580 if (!IN_RANGE (value
, -128, 127))
6584 /* Handle (const_vector [...]). */
6585 else if (GET_CODE (op
) == CONST_VECTOR
)
6587 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6588 && mode
!= V2DImode
)
6591 element
= CONST_VECTOR_ELT (op
, 0);
6592 if (!CONST_INT_P (element
))
6595 value
= INTVAL (element
);
6596 if (!IN_RANGE (value
, -128, 127))
6599 for (i
= 1; i
< nunits
; i
++)
6601 element
= CONST_VECTOR_ELT (op
, i
);
6602 if (!CONST_INT_P (element
))
6605 if (value
!= INTVAL (element
))
6610 /* Handle integer constants being loaded into the upper part of the VSX
6611 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6612 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6613 else if (CONST_INT_P (op
))
6615 if (!SCALAR_INT_MODE_P (mode
))
6618 value
= INTVAL (op
);
6619 if (!IN_RANGE (value
, -128, 127))
6622 if (!IN_RANGE (value
, -1, 0))
6624 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6627 if (EASY_VECTOR_15 (value
))
6635 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6636 sign extend. Special case 0/-1 to allow getting any VSX register instead
6637 of an Altivec register. */
6638 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6639 && EASY_VECTOR_15 (value
))
6642 /* Return # of instructions and the constant byte for XXSPLTIB. */
6643 if (mode
== V16QImode
)
6646 else if (IN_RANGE (value
, -1, 0))
6652 *constant_ptr
= (int) value
;
6657 output_vec_const_move (rtx
*operands
)
6665 mode
= GET_MODE (dest
);
6669 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6670 int xxspltib_value
= 256;
6673 if (zero_constant (vec
, mode
))
6675 if (TARGET_P9_VECTOR
)
6676 return "xxspltib %x0,0";
6678 else if (dest_vmx_p
)
6679 return "vspltisw %0,0";
6682 return "xxlxor %x0,%x0,%x0";
6685 if (all_ones_constant (vec
, mode
))
6687 if (TARGET_P9_VECTOR
)
6688 return "xxspltib %x0,255";
6690 else if (dest_vmx_p
)
6691 return "vspltisw %0,-1";
6693 else if (TARGET_P8_VECTOR
)
6694 return "xxlorc %x0,%x0,%x0";
6700 if (TARGET_P9_VECTOR
6701 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6705 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6706 return "xxspltib %x0,%2";
6717 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6718 if (zero_constant (vec
, mode
))
6719 return "vspltisw %0,0";
6721 if (all_ones_constant (vec
, mode
))
6722 return "vspltisw %0,-1";
6724 /* Do we need to construct a value using VSLDOI? */
6725 shift
= vspltis_shifted (vec
);
6729 splat_vec
= gen_easy_altivec_constant (vec
);
6730 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6731 operands
[1] = XEXP (splat_vec
, 0);
6732 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6735 switch (GET_MODE (splat_vec
))
6738 return "vspltisw %0,%1";
6741 return "vspltish %0,%1";
6744 return "vspltisb %0,%1";
6754 /* Initialize TARGET of vector PAIRED to VALS. */
6757 paired_expand_vector_init (rtx target
, rtx vals
)
6759 machine_mode mode
= GET_MODE (target
);
6760 int n_elts
= GET_MODE_NUNITS (mode
);
6762 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
6765 for (i
= 0; i
< n_elts
; ++i
)
6767 x
= XVECEXP (vals
, 0, i
);
6768 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6773 /* Load from constant pool. */
6774 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
6780 /* The vector is initialized only with non-constants. */
6781 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
6782 XVECEXP (vals
, 0, 1));
6784 emit_move_insn (target
, new_rtx
);
6788 /* One field is non-constant and the other one is a constant. Load the
6789 constant from the constant pool and use ps_merge instruction to
6790 construct the whole vector. */
6791 op1
= XVECEXP (vals
, 0, 0);
6792 op2
= XVECEXP (vals
, 0, 1);
6794 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
6796 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
6797 emit_move_insn (tmp
, constant_op
);
6799 if (CONSTANT_P (op1
))
6800 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
6802 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
6804 emit_move_insn (target
, new_rtx
);
6808 paired_expand_vector_move (rtx operands
[])
6810 rtx op0
= operands
[0], op1
= operands
[1];
6812 emit_move_insn (op0
, op1
);
6815 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6816 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6817 operands for the relation operation COND. This is a recursive
6821 paired_emit_vector_compare (enum rtx_code rcode
,
6822 rtx dest
, rtx op0
, rtx op1
,
6823 rtx cc_op0
, rtx cc_op1
)
6825 rtx tmp
= gen_reg_rtx (V2SFmode
);
6828 gcc_assert (TARGET_PAIRED_FLOAT
);
6829 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
6835 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6839 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6840 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
6844 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
6847 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6850 tmp1
= gen_reg_rtx (V2SFmode
);
6851 max
= gen_reg_rtx (V2SFmode
);
6852 min
= gen_reg_rtx (V2SFmode
);
6853 gen_reg_rtx (V2SFmode
);
6855 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
6856 emit_insn (gen_selv2sf4
6857 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6858 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
6859 emit_insn (gen_selv2sf4
6860 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
6861 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
6862 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
6865 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6868 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6871 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6874 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6877 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
6886 /* Emit vector conditional expression.
6887 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6888 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6891 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
6892 rtx cond
, rtx cc_op0
, rtx cc_op1
)
6894 enum rtx_code rcode
= GET_CODE (cond
);
6896 if (!TARGET_PAIRED_FLOAT
)
6899 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
6904 /* Initialize vector TARGET to VALS. */
6907 rs6000_expand_vector_init (rtx target
, rtx vals
)
6909 machine_mode mode
= GET_MODE (target
);
6910 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6911 int n_elts
= GET_MODE_NUNITS (mode
);
6912 int n_var
= 0, one_var
= -1;
6913 bool all_same
= true, all_const_zero
= true;
6917 for (i
= 0; i
< n_elts
; ++i
)
6919 x
= XVECEXP (vals
, 0, i
);
6920 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6921 ++n_var
, one_var
= i
;
6922 else if (x
!= CONST0_RTX (inner_mode
))
6923 all_const_zero
= false;
6925 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6931 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6932 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6933 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6935 /* Zero register. */
6936 emit_move_insn (target
, CONST0_RTX (mode
));
6939 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6941 /* Splat immediate. */
6942 emit_insn (gen_rtx_SET (target
, const_vec
));
6947 /* Load from constant pool. */
6948 emit_move_insn (target
, const_vec
);
6953 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6954 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
6958 size_t num_elements
= all_same
? 1 : 2;
6959 for (i
= 0; i
< num_elements
; i
++)
6961 op
[i
] = XVECEXP (vals
, 0, i
);
6962 /* Just in case there is a SUBREG with a smaller mode, do a
6964 if (GET_MODE (op
[i
]) != inner_mode
)
6966 rtx tmp
= gen_reg_rtx (inner_mode
);
6967 convert_move (tmp
, op
[i
], 0);
6970 /* Allow load with splat double word. */
6971 else if (MEM_P (op
[i
]))
6974 op
[i
] = force_reg (inner_mode
, op
[i
]);
6976 else if (!REG_P (op
[i
]))
6977 op
[i
] = force_reg (inner_mode
, op
[i
]);
6982 if (mode
== V2DFmode
)
6983 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
6985 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
6989 if (mode
== V2DFmode
)
6990 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
6992 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
6997 /* Special case initializing vector int if we are on 64-bit systems with
6998 direct move or we have the ISA 3.0 instructions. */
6999 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
7000 && TARGET_DIRECT_MOVE_64BIT
)
7004 rtx element0
= XVECEXP (vals
, 0, 0);
7005 if (MEM_P (element0
))
7006 element0
= rs6000_address_for_fpconvert (element0
);
7008 element0
= force_reg (SImode
, element0
);
7010 if (TARGET_P9_VECTOR
)
7011 emit_insn (gen_vsx_splat_v4si (target
, element0
));
7014 rtx tmp
= gen_reg_rtx (DImode
);
7015 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
7016 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
7025 for (i
= 0; i
< 4; i
++)
7027 elements
[i
] = XVECEXP (vals
, 0, i
);
7028 if (!CONST_INT_P (elements
[i
]) && !REG_P (elements
[i
]))
7029 elements
[i
] = copy_to_mode_reg (SImode
, elements
[i
]);
7032 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
7033 elements
[2], elements
[3]));
7038 /* With single precision floating point on VSX, know that internally single
7039 precision is actually represented as a double, and either make 2 V2DF
7040 vectors, and convert these vectors to single precision, or do one
7041 conversion, and splat the result to the other elements. */
7042 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
7046 rtx element0
= XVECEXP (vals
, 0, 0);
7048 if (TARGET_P9_VECTOR
)
7050 if (MEM_P (element0
))
7051 element0
= rs6000_address_for_fpconvert (element0
);
7053 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
7058 rtx freg
= gen_reg_rtx (V4SFmode
);
7059 rtx sreg
= force_reg (SFmode
, element0
);
7060 rtx cvt
= (TARGET_XSCVDPSPN
7061 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
7062 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
7065 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
7071 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
7072 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
7073 rtx flt_even
= gen_reg_rtx (V4SFmode
);
7074 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
7075 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
7076 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
7077 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
7078 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
7080 /* Use VMRGEW if we can instead of doing a permute. */
7081 if (TARGET_P8_VECTOR
)
7083 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
7084 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
7085 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7086 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7087 if (BYTES_BIG_ENDIAN
)
7088 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
7090 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
7094 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
7095 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
7096 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
7097 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
7098 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
7104 /* Special case initializing vector short/char that are splats if we are on
7105 64-bit systems with direct move. */
7106 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
7107 && (mode
== V16QImode
|| mode
== V8HImode
))
7109 rtx op0
= XVECEXP (vals
, 0, 0);
7110 rtx di_tmp
= gen_reg_rtx (DImode
);
7113 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
7115 if (mode
== V16QImode
)
7117 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
7118 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
7122 if (mode
== V8HImode
)
7124 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
7125 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
7130 /* Store value to stack temp. Load vector element. Splat. However, splat
7131 of 64-bit items is not supported on Altivec. */
7132 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
7134 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7135 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
7136 XVECEXP (vals
, 0, 0));
7137 x
= gen_rtx_UNSPEC (VOIDmode
,
7138 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7139 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7141 gen_rtx_SET (target
, mem
),
7143 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
7144 gen_rtx_PARALLEL (VOIDmode
,
7145 gen_rtvec (1, const0_rtx
)));
7146 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
7150 /* One field is non-constant. Load constant then overwrite
7154 rtx copy
= copy_rtx (vals
);
7156 /* Load constant part of vector, substitute neighboring value for
7158 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
7159 rs6000_expand_vector_init (target
, copy
);
7161 /* Insert variable. */
7162 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
7166 /* Construct the vector in memory one field at a time
7167 and load the whole vector. */
7168 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7169 for (i
= 0; i
< n_elts
; i
++)
7170 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
7171 i
* GET_MODE_SIZE (inner_mode
)),
7172 XVECEXP (vals
, 0, i
));
7173 emit_move_insn (target
, mem
);
7176 /* Set field ELT of TARGET to VAL. */
7179 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
7181 machine_mode mode
= GET_MODE (target
);
7182 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7183 rtx reg
= gen_reg_rtx (mode
);
7185 int width
= GET_MODE_SIZE (inner_mode
);
7188 val
= force_reg (GET_MODE (val
), val
);
7190 if (VECTOR_MEM_VSX_P (mode
))
7192 rtx insn
= NULL_RTX
;
7193 rtx elt_rtx
= GEN_INT (elt
);
7195 if (mode
== V2DFmode
)
7196 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
7198 else if (mode
== V2DImode
)
7199 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
7201 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
7203 if (mode
== V4SImode
)
7204 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
7205 else if (mode
== V8HImode
)
7206 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
7207 else if (mode
== V16QImode
)
7208 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
7209 else if (mode
== V4SFmode
)
7210 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
7220 /* Simplify setting single element vectors like V1TImode. */
7221 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
7223 emit_move_insn (target
, gen_lowpart (mode
, val
));
7227 /* Load single variable value. */
7228 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7229 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
7230 x
= gen_rtx_UNSPEC (VOIDmode
,
7231 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7232 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7234 gen_rtx_SET (reg
, mem
),
7237 /* Linear sequence. */
7238 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
7239 for (i
= 0; i
< 16; ++i
)
7240 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
7242 /* Set permute mask to insert element into target. */
7243 for (i
= 0; i
< width
; ++i
)
7244 XVECEXP (mask
, 0, elt
*width
+ i
)
7245 = GEN_INT (i
+ 0x10);
7246 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
7248 if (BYTES_BIG_ENDIAN
)
7249 x
= gen_rtx_UNSPEC (mode
,
7250 gen_rtvec (3, target
, reg
,
7251 force_reg (V16QImode
, x
)),
7255 if (TARGET_P9_VECTOR
)
7256 x
= gen_rtx_UNSPEC (mode
,
7257 gen_rtvec (3, target
, reg
,
7258 force_reg (V16QImode
, x
)),
7262 /* Invert selector. We prefer to generate VNAND on P8 so
7263 that future fusion opportunities can kick in, but must
7264 generate VNOR elsewhere. */
7265 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
7266 rtx iorx
= (TARGET_P8_VECTOR
7267 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
7268 : gen_rtx_AND (V16QImode
, notx
, notx
));
7269 rtx tmp
= gen_reg_rtx (V16QImode
);
7270 emit_insn (gen_rtx_SET (tmp
, iorx
));
7272 /* Permute with operands reversed and adjusted selector. */
7273 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
7278 emit_insn (gen_rtx_SET (target
, x
));
7281 /* Extract field ELT from VEC into TARGET. */
7284 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
7286 machine_mode mode
= GET_MODE (vec
);
7287 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7290 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
7297 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
7298 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
7301 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
7304 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
7307 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
7310 if (TARGET_DIRECT_MOVE_64BIT
)
7312 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
7318 if (TARGET_DIRECT_MOVE_64BIT
)
7320 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
7326 if (TARGET_DIRECT_MOVE_64BIT
)
7328 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
7334 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
7335 && TARGET_DIRECT_MOVE_64BIT
)
7337 if (GET_MODE (elt
) != DImode
)
7339 rtx tmp
= gen_reg_rtx (DImode
);
7340 convert_move (tmp
, elt
, 0);
7343 else if (!REG_P (elt
))
7344 elt
= force_reg (DImode
, elt
);
7349 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
7353 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
7357 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
7361 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
7365 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
7369 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
7377 gcc_assert (CONST_INT_P (elt
));
7379 /* Allocate mode-sized buffer. */
7380 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7382 emit_move_insn (mem
, vec
);
7384 /* Add offset to field within buffer matching vector element. */
7385 mem
= adjust_address_nv (mem
, inner_mode
,
7386 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
7388 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
7391 /* Helper function to return the register number of a RTX. */
7393 regno_or_subregno (rtx op
)
7397 else if (SUBREG_P (op
))
7398 return subreg_regno (op
);
7403 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7404 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7405 temporary (BASE_TMP) to fixup the address. Return the new memory address
7406 that is valid for reads or writes to a given register (SCALAR_REG). */
7409 rs6000_adjust_vec_address (rtx scalar_reg
,
7413 machine_mode scalar_mode
)
7415 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7416 rtx addr
= XEXP (mem
, 0);
7421 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7422 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
7424 /* Calculate what we need to add to the address to get the element
7426 if (CONST_INT_P (element
))
7427 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
7430 int byte_shift
= exact_log2 (scalar_size
);
7431 gcc_assert (byte_shift
>= 0);
7433 if (byte_shift
== 0)
7434 element_offset
= element
;
7438 if (TARGET_POWERPC64
)
7439 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7441 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7443 element_offset
= base_tmp
;
7447 /* Create the new address pointing to the element within the vector. If we
7448 are adding 0, we don't have to change the address. */
7449 if (element_offset
== const0_rtx
)
7452 /* A simple indirect address can be converted into a reg + offset
7454 else if (REG_P (addr
) || SUBREG_P (addr
))
7455 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
7457 /* Optimize D-FORM addresses with constant offset with a constant element, to
7458 include the element offset in the address directly. */
7459 else if (GET_CODE (addr
) == PLUS
)
7461 rtx op0
= XEXP (addr
, 0);
7462 rtx op1
= XEXP (addr
, 1);
7465 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7466 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7468 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7469 rtx offset_rtx
= GEN_INT (offset
);
7471 if (IN_RANGE (offset
, -32768, 32767)
7472 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7473 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7476 emit_move_insn (base_tmp
, offset_rtx
);
7477 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7482 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7483 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7485 /* Note, ADDI requires the register being added to be a base
7486 register. If the register was R0, load it up into the temporary
7489 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7491 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7492 gcc_assert (insn
!= NULL_RTX
);
7497 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7499 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7500 gcc_assert (insn
!= NULL_RTX
);
7506 emit_move_insn (base_tmp
, op1
);
7507 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7510 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7516 emit_move_insn (base_tmp
, addr
);
7517 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7520 /* If we have a PLUS, we need to see whether the particular register class
7521 allows for D-FORM or X-FORM addressing. */
7522 if (GET_CODE (new_addr
) == PLUS
)
7524 rtx op1
= XEXP (new_addr
, 1);
7525 addr_mask_type addr_mask
;
7526 int scalar_regno
= regno_or_subregno (scalar_reg
);
7528 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7529 if (INT_REGNO_P (scalar_regno
))
7530 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7532 else if (FP_REGNO_P (scalar_regno
))
7533 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7535 else if (ALTIVEC_REGNO_P (scalar_regno
))
7536 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7541 if (REG_P (op1
) || SUBREG_P (op1
))
7542 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7544 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7547 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7548 valid_addr_p
= true;
7551 valid_addr_p
= false;
7555 emit_move_insn (base_tmp
, new_addr
);
7556 new_addr
= base_tmp
;
7559 return change_address (mem
, scalar_mode
, new_addr
);
7562 /* Split a variable vec_extract operation into the component instructions. */
7565 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7568 machine_mode mode
= GET_MODE (src
);
7569 machine_mode scalar_mode
= GET_MODE (dest
);
7570 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7571 int byte_shift
= exact_log2 (scalar_size
);
7573 gcc_assert (byte_shift
>= 0);
7575 /* If we are given a memory address, optimize to load just the element. We
7576 don't have to adjust the vector element number on little endian
7580 gcc_assert (REG_P (tmp_gpr
));
7581 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7582 tmp_gpr
, scalar_mode
));
7586 else if (REG_P (src
) || SUBREG_P (src
))
7588 int bit_shift
= byte_shift
+ 3;
7590 int dest_regno
= regno_or_subregno (dest
);
7591 int src_regno
= regno_or_subregno (src
);
7592 int element_regno
= regno_or_subregno (element
);
7594 gcc_assert (REG_P (tmp_gpr
));
7596 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7597 a general purpose register. */
7598 if (TARGET_P9_VECTOR
7599 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7600 && INT_REGNO_P (dest_regno
)
7601 && ALTIVEC_REGNO_P (src_regno
)
7602 && INT_REGNO_P (element_regno
))
7604 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7605 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7607 if (mode
== V16QImode
)
7608 emit_insn (VECTOR_ELT_ORDER_BIG
7609 ? gen_vextublx (dest_si
, element_si
, src
)
7610 : gen_vextubrx (dest_si
, element_si
, src
));
7612 else if (mode
== V8HImode
)
7614 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7615 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7616 emit_insn (VECTOR_ELT_ORDER_BIG
7617 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7618 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7624 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7625 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7626 emit_insn (VECTOR_ELT_ORDER_BIG
7627 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7628 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7635 gcc_assert (REG_P (tmp_altivec
));
7637 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7638 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7639 will shift the element into the upper position (adding 3 to convert a
7640 byte shift into a bit shift). */
7641 if (scalar_size
== 8)
7643 if (!VECTOR_ELT_ORDER_BIG
)
7645 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7651 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7653 emit_insn (gen_rtx_SET (tmp_gpr
,
7654 gen_rtx_AND (DImode
,
7655 gen_rtx_ASHIFT (DImode
,
7662 if (!VECTOR_ELT_ORDER_BIG
)
7664 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7666 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7667 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7673 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7676 /* Get the value into the lower byte of the Altivec register where VSLO
7678 if (TARGET_P9_VECTOR
)
7679 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7680 else if (can_create_pseudo_p ())
7681 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7684 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7685 emit_move_insn (tmp_di
, tmp_gpr
);
7686 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7689 /* Do the VSLO to get the value into the final location. */
7693 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7697 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7702 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7703 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7704 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7705 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7708 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7716 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7717 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7718 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7719 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7721 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7722 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7723 GEN_INT (64 - (8 * scalar_size
))));
7737 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7738 two SImode values. */
7741 rs6000_split_v4si_init_di_reg (rtx dest
, rtx si1
, rtx si2
, rtx tmp
)
7743 const unsigned HOST_WIDE_INT mask_32bit
= HOST_WIDE_INT_C (0xffffffff);
7745 if (CONST_INT_P (si1
) && CONST_INT_P (si2
))
7747 unsigned HOST_WIDE_INT const1
= (UINTVAL (si1
) & mask_32bit
) << 32;
7748 unsigned HOST_WIDE_INT const2
= UINTVAL (si2
) & mask_32bit
;
7750 emit_move_insn (dest
, GEN_INT (const1
| const2
));
7754 /* Put si1 into upper 32-bits of dest. */
7755 if (CONST_INT_P (si1
))
7756 emit_move_insn (dest
, GEN_INT ((UINTVAL (si1
) & mask_32bit
) << 32));
7759 /* Generate RLDIC. */
7760 rtx si1_di
= gen_rtx_REG (DImode
, regno_or_subregno (si1
));
7761 rtx shift_rtx
= gen_rtx_ASHIFT (DImode
, si1_di
, GEN_INT (32));
7762 rtx mask_rtx
= GEN_INT (mask_32bit
<< 32);
7763 rtx and_rtx
= gen_rtx_AND (DImode
, shift_rtx
, mask_rtx
);
7764 gcc_assert (!reg_overlap_mentioned_p (dest
, si1
));
7765 emit_insn (gen_rtx_SET (dest
, and_rtx
));
7768 /* Put si2 into the temporary. */
7769 gcc_assert (!reg_overlap_mentioned_p (dest
, tmp
));
7770 if (CONST_INT_P (si2
))
7771 emit_move_insn (tmp
, GEN_INT (UINTVAL (si2
) & mask_32bit
));
7773 emit_insn (gen_zero_extendsidi2 (tmp
, si2
));
7775 /* Combine the two parts. */
7776 emit_insn (gen_iordi3 (dest
, dest
, tmp
));
7780 /* Split a V4SI initialization. */
7783 rs6000_split_v4si_init (rtx operands
[])
7785 rtx dest
= operands
[0];
7787 /* Destination is a GPR, build up the two DImode parts in place. */
7788 if (REG_P (dest
) || SUBREG_P (dest
))
7790 int d_regno
= regno_or_subregno (dest
);
7791 rtx scalar1
= operands
[1];
7792 rtx scalar2
= operands
[2];
7793 rtx scalar3
= operands
[3];
7794 rtx scalar4
= operands
[4];
7795 rtx tmp1
= operands
[5];
7796 rtx tmp2
= operands
[6];
7798 /* Even though we only need one temporary (plus the destination, which
7799 has an early clobber constraint, try to use two temporaries, one for
7800 each double word created. That way the 2nd insn scheduling pass can
7801 rearrange things so the two parts are done in parallel. */
7802 if (BYTES_BIG_ENDIAN
)
7804 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
);
7805 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
+ 1);
7806 rs6000_split_v4si_init_di_reg (di_lo
, scalar1
, scalar2
, tmp1
);
7807 rs6000_split_v4si_init_di_reg (di_hi
, scalar3
, scalar4
, tmp2
);
7811 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
+ 1);
7812 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
);
7813 gcc_assert (!VECTOR_ELT_ORDER_BIG
);
7814 rs6000_split_v4si_init_di_reg (di_lo
, scalar4
, scalar3
, tmp1
);
7815 rs6000_split_v4si_init_di_reg (di_hi
, scalar2
, scalar1
, tmp2
);
7824 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7825 selects whether the alignment is abi mandated, optional, or
7826 both abi and optional alignment. */
7829 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7831 if (how
!= align_opt
)
7833 if (TREE_CODE (type
) == VECTOR_TYPE
)
7835 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
)))
7840 else if (align
< 128)
7845 if (how
!= align_abi
)
7847 if (TREE_CODE (type
) == ARRAY_TYPE
7848 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7850 if (align
< BITS_PER_WORD
)
7851 align
= BITS_PER_WORD
;
7858 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7861 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7863 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7865 if (computed
!= 128)
7868 if (!warned
&& warn_psabi
)
7871 inform (input_location
,
7872 "the layout of aggregates containing vectors with"
7873 " %d-byte alignment has changed in GCC 5",
7874 computed
/ BITS_PER_UNIT
);
7877 /* In current GCC there is no special case. */
7884 /* AIX increases natural record alignment to doubleword if the first
7885 field is an FP double while the FP fields remain word aligned. */
7888 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7889 unsigned int specified
)
7891 unsigned int align
= MAX (computed
, specified
);
7892 tree field
= TYPE_FIELDS (type
);
7894 /* Skip all non field decls */
7895 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7896 field
= DECL_CHAIN (field
);
7898 if (field
!= NULL
&& field
!= type
)
7900 type
= TREE_TYPE (field
);
7901 while (TREE_CODE (type
) == ARRAY_TYPE
)
7902 type
= TREE_TYPE (type
);
7904 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7905 align
= MAX (align
, 64);
7911 /* Darwin increases record alignment to the natural alignment of
7915 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7916 unsigned int specified
)
7918 unsigned int align
= MAX (computed
, specified
);
7920 if (TYPE_PACKED (type
))
7923 /* Find the first field, looking down into aggregates. */
7925 tree field
= TYPE_FIELDS (type
);
7926 /* Skip all non field decls */
7927 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7928 field
= DECL_CHAIN (field
);
7931 /* A packed field does not contribute any extra alignment. */
7932 if (DECL_PACKED (field
))
7934 type
= TREE_TYPE (field
);
7935 while (TREE_CODE (type
) == ARRAY_TYPE
)
7936 type
= TREE_TYPE (type
);
7937 } while (AGGREGATE_TYPE_P (type
));
7939 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
7940 align
= MAX (align
, TYPE_ALIGN (type
));
7945 /* Return 1 for an operand in small memory on V.4/eabi. */
7948 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
7949 machine_mode mode ATTRIBUTE_UNUSED
)
7954 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
7957 if (DEFAULT_ABI
!= ABI_V4
)
7960 if (GET_CODE (op
) == SYMBOL_REF
)
7963 else if (GET_CODE (op
) != CONST
7964 || GET_CODE (XEXP (op
, 0)) != PLUS
7965 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
7966 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
7971 rtx sum
= XEXP (op
, 0);
7972 HOST_WIDE_INT summand
;
7974 /* We have to be careful here, because it is the referenced address
7975 that must be 32k from _SDA_BASE_, not just the symbol. */
7976 summand
= INTVAL (XEXP (sum
, 1));
7977 if (summand
< 0 || summand
> g_switch_value
)
7980 sym_ref
= XEXP (sum
, 0);
7983 return SYMBOL_REF_SMALL_P (sym_ref
);
7989 /* Return true if either operand is a general purpose register. */
7992 gpr_or_gpr_p (rtx op0
, rtx op1
)
7994 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
7995 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
7998 /* Return true if this is a move direct operation between GPR registers and
7999 floating point/VSX registers. */
8002 direct_move_p (rtx op0
, rtx op1
)
8006 if (!REG_P (op0
) || !REG_P (op1
))
8009 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
8012 regno0
= REGNO (op0
);
8013 regno1
= REGNO (op1
);
8014 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
8017 if (INT_REGNO_P (regno0
))
8018 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
8020 else if (INT_REGNO_P (regno1
))
8022 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
8025 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
8032 /* Return true if the OFFSET is valid for the quad address instructions that
8033 use d-form (register + offset) addressing. */
8036 quad_address_offset_p (HOST_WIDE_INT offset
)
8038 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
8041 /* Return true if the ADDR is an acceptable address for a quad memory
8042 operation of mode MODE (either LQ/STQ for general purpose registers, or
8043 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8044 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8045 3.0 LXV/STXV instruction. */
8048 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
8052 if (GET_MODE_SIZE (mode
) != 16)
8055 if (legitimate_indirect_address_p (addr
, strict
))
8058 if (VECTOR_MODE_P (mode
) && !mode_supports_vsx_dform_quad (mode
))
8061 if (GET_CODE (addr
) != PLUS
)
8064 op0
= XEXP (addr
, 0);
8065 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
8068 op1
= XEXP (addr
, 1);
8069 if (!CONST_INT_P (op1
))
8072 return quad_address_offset_p (INTVAL (op1
));
8075 /* Return true if this is a load or store quad operation. This function does
8076 not handle the atomic quad memory instructions. */
8079 quad_load_store_p (rtx op0
, rtx op1
)
8083 if (!TARGET_QUAD_MEMORY
)
8086 else if (REG_P (op0
) && MEM_P (op1
))
8087 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
8088 && quad_memory_operand (op1
, GET_MODE (op1
))
8089 && !reg_overlap_mentioned_p (op0
, op1
));
8091 else if (MEM_P (op0
) && REG_P (op1
))
8092 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
8093 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
8098 if (TARGET_DEBUG_ADDR
)
8100 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
8101 ret
? "true" : "false");
8102 debug_rtx (gen_rtx_SET (op0
, op1
));
8108 /* Given an address, return a constant offset term if one exists. */
8111 address_offset (rtx op
)
8113 if (GET_CODE (op
) == PRE_INC
8114 || GET_CODE (op
) == PRE_DEC
)
8116 else if (GET_CODE (op
) == PRE_MODIFY
8117 || GET_CODE (op
) == LO_SUM
)
8120 if (GET_CODE (op
) == CONST
)
8123 if (GET_CODE (op
) == PLUS
)
8126 if (CONST_INT_P (op
))
8132 /* Return true if the MEM operand is a memory operand suitable for use
8133 with a (full width, possibly multiple) gpr load/store. On
8134 powerpc64 this means the offset must be divisible by 4.
8135 Implements 'Y' constraint.
8137 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8138 a constraint function we know the operand has satisfied a suitable
8139 memory predicate. Also accept some odd rtl generated by reload
8140 (see rs6000_legitimize_reload_address for various forms). It is
8141 important that reload rtl be accepted by appropriate constraints
8142 but not by the operand predicate.
8144 Offsetting a lo_sum should not be allowed, except where we know by
8145 alignment that a 32k boundary is not crossed, but see the ???
8146 comment in rs6000_legitimize_reload_address. Note that by
8147 "offsetting" here we mean a further offset to access parts of the
8148 MEM. It's fine to have a lo_sum where the inner address is offset
8149 from a sym, since the same sym+offset will appear in the high part
8150 of the address calculation. */
8153 mem_operand_gpr (rtx op
, machine_mode mode
)
8155 unsigned HOST_WIDE_INT offset
;
8157 rtx addr
= XEXP (op
, 0);
8159 op
= address_offset (addr
);
8163 offset
= INTVAL (op
);
8164 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
8167 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8171 if (GET_CODE (addr
) == LO_SUM
)
8172 /* For lo_sum addresses, we must allow any offset except one that
8173 causes a wrap, so test only the low 16 bits. */
8174 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8176 return offset
+ 0x8000 < 0x10000u
- extra
;
8179 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8180 enforce an offset divisible by 4 even for 32-bit. */
8183 mem_operand_ds_form (rtx op
, machine_mode mode
)
8185 unsigned HOST_WIDE_INT offset
;
8187 rtx addr
= XEXP (op
, 0);
8189 if (!offsettable_address_p (false, mode
, addr
))
8192 op
= address_offset (addr
);
8196 offset
= INTVAL (op
);
8197 if ((offset
& 3) != 0)
8200 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8204 if (GET_CODE (addr
) == LO_SUM
)
8205 /* For lo_sum addresses, we must allow any offset except one that
8206 causes a wrap, so test only the low 16 bits. */
8207 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8209 return offset
+ 0x8000 < 0x10000u
- extra
;
8212 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8215 reg_offset_addressing_ok_p (machine_mode mode
)
8229 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8230 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8231 a vector mode, if we want to use the VSX registers to move it around,
8232 we need to restrict ourselves to reg+reg addressing. Similarly for
8233 IEEE 128-bit floating point that is passed in a single vector
8235 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
8236 return mode_supports_vsx_dform_quad (mode
);
8241 /* Paired vector modes. Only reg+reg addressing is valid. */
8242 if (TARGET_PAIRED_FLOAT
)
8247 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8248 addressing for the LFIWZX and STFIWX instructions. */
8249 if (TARGET_NO_SDMODE_STACK
)
8261 virtual_stack_registers_memory_p (rtx op
)
8265 if (GET_CODE (op
) == REG
)
8266 regnum
= REGNO (op
);
8268 else if (GET_CODE (op
) == PLUS
8269 && GET_CODE (XEXP (op
, 0)) == REG
8270 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
8271 regnum
= REGNO (XEXP (op
, 0));
8276 return (regnum
>= FIRST_VIRTUAL_REGISTER
8277 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
8280 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8281 is known to not straddle a 32k boundary. This function is used
8282 to determine whether -mcmodel=medium code can use TOC pointer
8283 relative addressing for OP. This means the alignment of the TOC
8284 pointer must also be taken into account, and unfortunately that is
8287 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8288 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8292 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
8296 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
8298 if (GET_CODE (op
) != SYMBOL_REF
)
8301 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8303 if (mode_supports_vsx_dform_quad (mode
))
8306 dsize
= GET_MODE_SIZE (mode
);
8307 decl
= SYMBOL_REF_DECL (op
);
8313 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8314 replacing memory addresses with an anchor plus offset. We
8315 could find the decl by rummaging around in the block->objects
8316 VEC for the given offset but that seems like too much work. */
8317 dalign
= BITS_PER_UNIT
;
8318 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
8319 && SYMBOL_REF_ANCHOR_P (op
)
8320 && SYMBOL_REF_BLOCK (op
) != NULL
)
8322 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
8324 dalign
= block
->alignment
;
8325 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
8327 else if (CONSTANT_POOL_ADDRESS_P (op
))
8329 /* It would be nice to have get_pool_align().. */
8330 machine_mode cmode
= get_pool_mode (op
);
8332 dalign
= GET_MODE_ALIGNMENT (cmode
);
8335 else if (DECL_P (decl
))
8337 dalign
= DECL_ALIGN (decl
);
8341 /* Allow BLKmode when the entire object is known to not
8342 cross a 32k boundary. */
8343 if (!DECL_SIZE_UNIT (decl
))
8346 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
8349 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
8353 dalign
/= BITS_PER_UNIT
;
8354 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8355 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8356 return dalign
>= dsize
;
8362 /* Find how many bits of the alignment we know for this access. */
8363 dalign
/= BITS_PER_UNIT
;
8364 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8365 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8367 lsb
= offset
& -offset
;
8371 return dalign
>= dsize
;
8375 constant_pool_expr_p (rtx op
)
8379 split_const (op
, &base
, &offset
);
8380 return (GET_CODE (base
) == SYMBOL_REF
8381 && CONSTANT_POOL_ADDRESS_P (base
)
8382 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
8385 /* These are only used to pass through from print_operand/print_operand_address
8386 to rs6000_output_addr_const_extra over the intervening function
8387 output_addr_const which is not target code. */
8388 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
8390 /* Return true if OP is a toc pointer relative address (the output
8391 of create_TOC_reference). If STRICT, do not match non-split
8392 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8393 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8394 TOCREL_OFFSET_RET respectively. */
8397 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
8398 const_rtx
*tocrel_offset_ret
)
8403 if (TARGET_CMODEL
!= CMODEL_SMALL
)
8405 /* When strict ensure we have everything tidy. */
8407 && !(GET_CODE (op
) == LO_SUM
8408 && REG_P (XEXP (op
, 0))
8409 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
8412 /* When not strict, allow non-split TOC addresses and also allow
8413 (lo_sum (high ..)) TOC addresses created during reload. */
8414 if (GET_CODE (op
) == LO_SUM
)
8418 const_rtx tocrel_base
= op
;
8419 const_rtx tocrel_offset
= const0_rtx
;
8421 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
8423 tocrel_base
= XEXP (op
, 0);
8424 tocrel_offset
= XEXP (op
, 1);
8427 if (tocrel_base_ret
)
8428 *tocrel_base_ret
= tocrel_base
;
8429 if (tocrel_offset_ret
)
8430 *tocrel_offset_ret
= tocrel_offset
;
8432 return (GET_CODE (tocrel_base
) == UNSPEC
8433 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
8436 /* Return true if X is a constant pool address, and also for cmodel=medium
8437 if X is a toc-relative address known to be offsettable within MODE. */
8440 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
8443 const_rtx tocrel_base
, tocrel_offset
;
8444 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
8445 && (TARGET_CMODEL
!= CMODEL_MEDIUM
8446 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
8448 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
8449 INTVAL (tocrel_offset
), mode
)));
8453 legitimate_small_data_p (machine_mode mode
, rtx x
)
8455 return (DEFAULT_ABI
== ABI_V4
8456 && !flag_pic
&& !TARGET_TOC
8457 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
8458 && small_data_operand (x
, mode
));
8462 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
8463 bool strict
, bool worst_case
)
8465 unsigned HOST_WIDE_INT offset
;
8468 if (GET_CODE (x
) != PLUS
)
8470 if (!REG_P (XEXP (x
, 0)))
8472 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8474 if (mode_supports_vsx_dform_quad (mode
))
8475 return quad_address_p (x
, mode
, strict
);
8476 if (!reg_offset_addressing_ok_p (mode
))
8477 return virtual_stack_registers_memory_p (x
);
8478 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
8480 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8483 offset
= INTVAL (XEXP (x
, 1));
8489 /* Paired single modes: offset addressing isn't valid. */
8495 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8497 if (VECTOR_MEM_VSX_P (mode
))
8502 if (!TARGET_POWERPC64
)
8504 else if (offset
& 3)
8517 if (!TARGET_POWERPC64
)
8519 else if (offset
& 3)
8528 return offset
< 0x10000 - extra
;
8532 legitimate_indexed_address_p (rtx x
, int strict
)
8536 if (GET_CODE (x
) != PLUS
)
8542 return (REG_P (op0
) && REG_P (op1
)
8543 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8544 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8545 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8546 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8550 avoiding_indexed_address_p (machine_mode mode
)
8552 /* Avoid indexed addressing for modes that have non-indexed
8553 load/store instruction forms. */
8554 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8558 legitimate_indirect_address_p (rtx x
, int strict
)
8560 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8564 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8566 if (!TARGET_MACHO
|| !flag_pic
8567 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8571 if (GET_CODE (x
) != LO_SUM
)
8573 if (GET_CODE (XEXP (x
, 0)) != REG
)
8575 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8579 return CONSTANT_P (x
);
8583 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8585 if (GET_CODE (x
) != LO_SUM
)
8587 if (GET_CODE (XEXP (x
, 0)) != REG
)
8589 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8591 /* quad word addresses are restricted, and we can't use LO_SUM. */
8592 if (mode_supports_vsx_dform_quad (mode
))
8596 if (TARGET_ELF
|| TARGET_MACHO
)
8600 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8602 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8603 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8604 recognizes some LO_SUM addresses as valid although this
8605 function says opposite. In most cases, LRA through different
8606 transformations can generate correct code for address reloads.
8607 It can not manage only some LO_SUM cases. So we need to add
8608 code analogous to one in rs6000_legitimize_reload_address for
8609 LOW_SUM here saying that some addresses are still valid. */
8610 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8611 && small_toc_ref (x
, VOIDmode
));
8612 if (TARGET_TOC
&& ! large_toc_ok
)
8614 if (GET_MODE_NUNITS (mode
) != 1)
8616 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8617 && !(/* ??? Assume floating point reg based on mode? */
8618 TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
8619 && (mode
== DFmode
|| mode
== DDmode
)))
8622 return CONSTANT_P (x
) || large_toc_ok
;
8629 /* Try machine-dependent ways of modifying an illegitimate address
8630 to be legitimate. If we find one, return the new, valid address.
8631 This is used from only one place: `memory_address' in explow.c.
8633 OLDX is the address as it was before break_out_memory_refs was
8634 called. In some cases it is useful to look at this to decide what
8637 It is always safe for this function to do nothing. It exists to
8638 recognize opportunities to optimize the output.
8640 On RS/6000, first check for the sum of a register with a constant
8641 integer that is out of range. If so, generate code to add the
8642 constant with the low-order 16 bits masked to the register and force
8643 this result into another register (this can be done with `cau').
8644 Then generate an address of REG+(CONST&0xffff), allowing for the
8645 possibility of bit 16 being a one.
8647 Then check for the sum of a register and something not constant, try to
8648 load the other things into a register and return the sum. */
8651 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8656 if (!reg_offset_addressing_ok_p (mode
)
8657 || mode_supports_vsx_dform_quad (mode
))
8659 if (virtual_stack_registers_memory_p (x
))
8662 /* In theory we should not be seeing addresses of the form reg+0,
8663 but just in case it is generated, optimize it away. */
8664 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8665 return force_reg (Pmode
, XEXP (x
, 0));
8667 /* For TImode with load/store quad, restrict addresses to just a single
8668 pointer, so it works with both GPRs and VSX registers. */
8669 /* Make sure both operands are registers. */
8670 else if (GET_CODE (x
) == PLUS
8671 && (mode
!= TImode
|| !TARGET_VSX
))
8672 return gen_rtx_PLUS (Pmode
,
8673 force_reg (Pmode
, XEXP (x
, 0)),
8674 force_reg (Pmode
, XEXP (x
, 1)));
8676 return force_reg (Pmode
, x
);
8678 if (GET_CODE (x
) == SYMBOL_REF
)
8680 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8682 return rs6000_legitimize_tls_address (x
, model
);
8694 /* As in legitimate_offset_address_p we do not assume
8695 worst-case. The mode here is just a hint as to the registers
8696 used. A TImode is usually in gprs, but may actually be in
8697 fprs. Leave worst-case scenario for reload to handle via
8698 insn constraints. PTImode is only GPRs. */
8705 if (GET_CODE (x
) == PLUS
8706 && GET_CODE (XEXP (x
, 0)) == REG
8707 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8708 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8710 && !PAIRED_VECTOR_MODE (mode
))
8712 HOST_WIDE_INT high_int
, low_int
;
8714 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8715 if (low_int
>= 0x8000 - extra
)
8717 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8718 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8719 GEN_INT (high_int
)), 0);
8720 return plus_constant (Pmode
, sum
, low_int
);
8722 else if (GET_CODE (x
) == PLUS
8723 && GET_CODE (XEXP (x
, 0)) == REG
8724 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8725 && GET_MODE_NUNITS (mode
) == 1
8726 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8727 || (/* ??? Assume floating point reg based on mode? */
8728 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8729 && (mode
== DFmode
|| mode
== DDmode
)))
8730 && !avoiding_indexed_address_p (mode
))
8732 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8733 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8735 else if (PAIRED_VECTOR_MODE (mode
))
8739 /* We accept [reg + reg]. */
8741 if (GET_CODE (x
) == PLUS
)
8743 rtx op1
= XEXP (x
, 0);
8744 rtx op2
= XEXP (x
, 1);
8747 op1
= force_reg (Pmode
, op1
);
8748 op2
= force_reg (Pmode
, op2
);
8750 /* We can't always do [reg + reg] for these, because [reg +
8751 reg + offset] is not a legitimate addressing mode. */
8752 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
8754 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
8755 return force_reg (Pmode
, y
);
8760 return force_reg (Pmode
, x
);
8762 else if ((TARGET_ELF
8764 || !MACHO_DYNAMIC_NO_PIC_P
8770 && GET_CODE (x
) != CONST_INT
8771 && GET_CODE (x
) != CONST_WIDE_INT
8772 && GET_CODE (x
) != CONST_DOUBLE
8774 && GET_MODE_NUNITS (mode
) == 1
8775 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8776 || (/* ??? Assume floating point reg based on mode? */
8777 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
8778 && (mode
== DFmode
|| mode
== DDmode
))))
8780 rtx reg
= gen_reg_rtx (Pmode
);
8782 emit_insn (gen_elf_high (reg
, x
));
8784 emit_insn (gen_macho_high (reg
, x
));
8785 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8788 && GET_CODE (x
) == SYMBOL_REF
8789 && constant_pool_expr_p (x
)
8790 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8791 return create_TOC_reference (x
, NULL_RTX
);
8796 /* Debug version of rs6000_legitimize_address. */
8798 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8804 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8805 insns
= get_insns ();
8811 "\nrs6000_legitimize_address: mode %s, old code %s, "
8812 "new code %s, modified\n",
8813 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8814 GET_RTX_NAME (GET_CODE (ret
)));
8816 fprintf (stderr
, "Original address:\n");
8819 fprintf (stderr
, "oldx:\n");
8822 fprintf (stderr
, "New address:\n");
8827 fprintf (stderr
, "Insns added:\n");
8828 debug_rtx_list (insns
, 20);
8834 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8835 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8846 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8847 We need to emit DTP-relative relocations. */
8849 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8851 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8856 fputs ("\t.long\t", file
);
8859 fputs (DOUBLE_INT_ASM_OP
, file
);
8864 output_addr_const (file
, x
);
8866 fputs ("@dtprel+0x8000", file
);
8867 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8869 switch (SYMBOL_REF_TLS_MODEL (x
))
8873 case TLS_MODEL_LOCAL_EXEC
:
8874 fputs ("@le", file
);
8876 case TLS_MODEL_INITIAL_EXEC
:
8877 fputs ("@ie", file
);
8879 case TLS_MODEL_GLOBAL_DYNAMIC
:
8880 case TLS_MODEL_LOCAL_DYNAMIC
:
8889 /* Return true if X is a symbol that refers to real (rather than emulated)
8893 rs6000_real_tls_symbol_ref_p (rtx x
)
8895 return (GET_CODE (x
) == SYMBOL_REF
8896 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8899 /* In the name of slightly smaller debug output, and to cater to
8900 general assembler lossage, recognize various UNSPEC sequences
8901 and turn them back into a direct symbol reference. */
8904 rs6000_delegitimize_address (rtx orig_x
)
8908 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8914 if (TARGET_CMODEL
!= CMODEL_SMALL
8915 && GET_CODE (y
) == LO_SUM
)
8919 if (GET_CODE (y
) == PLUS
8920 && GET_MODE (y
) == Pmode
8921 && CONST_INT_P (XEXP (y
, 1)))
8923 offset
= XEXP (y
, 1);
8927 if (GET_CODE (y
) == UNSPEC
8928 && XINT (y
, 1) == UNSPEC_TOCREL
)
8930 y
= XVECEXP (y
, 0, 0);
8933 /* Do not associate thread-local symbols with the original
8934 constant pool symbol. */
8936 && GET_CODE (y
) == SYMBOL_REF
8937 && CONSTANT_POOL_ADDRESS_P (y
)
8938 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
8942 if (offset
!= NULL_RTX
)
8943 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
8944 if (!MEM_P (orig_x
))
8947 return replace_equiv_address_nv (orig_x
, y
);
8951 && GET_CODE (orig_x
) == LO_SUM
8952 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
8954 y
= XEXP (XEXP (orig_x
, 1), 0);
8955 if (GET_CODE (y
) == UNSPEC
8956 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
8957 return XVECEXP (y
, 0, 0);
8963 /* Return true if X shouldn't be emitted into the debug info.
8964 The linker doesn't like .toc section references from
8965 .debug_* sections, so reject .toc section symbols. */
8968 rs6000_const_not_ok_for_debug_p (rtx x
)
8970 if (GET_CODE (x
) == SYMBOL_REF
8971 && CONSTANT_POOL_ADDRESS_P (x
))
8973 rtx c
= get_pool_constant (x
);
8974 machine_mode cmode
= get_pool_mode (x
);
8975 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
8983 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8986 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
8988 int icode
= INSN_CODE (insn
);
8990 /* Reject creating doloop insns. Combine should not be allowed
8991 to create these for a number of reasons:
8992 1) In a nested loop, if combine creates one of these in an
8993 outer loop and the register allocator happens to allocate ctr
8994 to the outer loop insn, then the inner loop can't use ctr.
8995 Inner loops ought to be more highly optimized.
8996 2) Combine often wants to create one of these from what was
8997 originally a three insn sequence, first combining the three
8998 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8999 allocated ctr, the splitter takes use back to the three insn
9000 sequence. It's better to stop combine at the two insn
9002 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9003 insns, the register allocator sometimes uses floating point
9004 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9005 jump insn and output reloads are not implemented for jumps,
9006 the ctrsi/ctrdi splitters need to handle all possible cases.
9007 That's a pain, and it gets to be seriously difficult when a
9008 splitter that runs after reload needs memory to transfer from
9009 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9010 for the difficult case. It's better to not create problems
9011 in the first place. */
9012 if (icode
!= CODE_FOR_nothing
9013 && (icode
== CODE_FOR_ctrsi_internal1
9014 || icode
== CODE_FOR_ctrdi_internal1
9015 || icode
== CODE_FOR_ctrsi_internal2
9016 || icode
== CODE_FOR_ctrdi_internal2
9017 || icode
== CODE_FOR_ctrsi_internal3
9018 || icode
== CODE_FOR_ctrdi_internal3
9019 || icode
== CODE_FOR_ctrsi_internal4
9020 || icode
== CODE_FOR_ctrdi_internal4
))
9026 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9028 static GTY(()) rtx rs6000_tls_symbol
;
9030 rs6000_tls_get_addr (void)
9032 if (!rs6000_tls_symbol
)
9033 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
9035 return rs6000_tls_symbol
;
9038 /* Construct the SYMBOL_REF for TLS GOT references. */
9040 static GTY(()) rtx rs6000_got_symbol
;
9042 rs6000_got_sym (void)
9044 if (!rs6000_got_symbol
)
9046 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
9047 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
9048 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
9051 return rs6000_got_symbol
;
9054 /* AIX Thread-Local Address support. */
9057 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
9059 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
9063 name
= XSTR (addr
, 0);
9064 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9065 or the symbol will be in TLS private data section. */
9066 if (name
[strlen (name
) - 1] != ']'
9067 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
9068 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
9070 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
9071 strcpy (tlsname
, name
);
9073 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
9074 tlsaddr
= copy_rtx (addr
);
9075 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
9080 /* Place addr into TOC constant pool. */
9081 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
9083 /* Output the TOC entry and create the MEM referencing the value. */
9084 if (constant_pool_expr_p (XEXP (sym
, 0))
9085 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
9087 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
9088 mem
= gen_const_mem (Pmode
, tocref
);
9089 set_mem_alias_set (mem
, get_TOC_alias_set ());
9094 /* Use global-dynamic for local-dynamic. */
9095 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
9096 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
9098 /* Create new TOC reference for @m symbol. */
9099 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
9100 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
9101 strcpy (tlsname
, "*LCM");
9102 strcat (tlsname
, name
+ 3);
9103 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
9104 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
9105 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
9106 rtx modmem
= gen_const_mem (Pmode
, tocref
);
9107 set_mem_alias_set (modmem
, get_TOC_alias_set ());
9109 rtx modreg
= gen_reg_rtx (Pmode
);
9110 emit_insn (gen_rtx_SET (modreg
, modmem
));
9112 tmpreg
= gen_reg_rtx (Pmode
);
9113 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9115 dest
= gen_reg_rtx (Pmode
);
9117 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
9119 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
9122 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9123 else if (TARGET_32BIT
)
9125 tlsreg
= gen_reg_rtx (SImode
);
9126 emit_insn (gen_tls_get_tpointer (tlsreg
));
9129 tlsreg
= gen_rtx_REG (DImode
, 13);
9131 /* Load the TOC value into temporary register. */
9132 tmpreg
= gen_reg_rtx (Pmode
);
9133 emit_insn (gen_rtx_SET (tmpreg
, mem
));
9134 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
9135 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
9137 /* Add TOC symbol value to TLS pointer. */
9138 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
9143 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9144 this (thread-local) address. */
9147 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
9152 return rs6000_legitimize_tls_address_aix (addr
, model
);
9154 dest
= gen_reg_rtx (Pmode
);
9155 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
9161 tlsreg
= gen_rtx_REG (Pmode
, 13);
9162 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
9166 tlsreg
= gen_rtx_REG (Pmode
, 2);
9167 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
9171 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
9175 tmp
= gen_reg_rtx (Pmode
);
9178 tlsreg
= gen_rtx_REG (Pmode
, 13);
9179 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
9183 tlsreg
= gen_rtx_REG (Pmode
, 2);
9184 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
9188 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
9190 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
9195 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
9197 /* We currently use relocations like @got@tlsgd for tls, which
9198 means the linker will handle allocation of tls entries, placing
9199 them in the .got section. So use a pointer to the .got section,
9200 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9201 or to secondary GOT sections used by 32-bit -fPIC. */
9203 got
= gen_rtx_REG (Pmode
, 2);
9207 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
9210 rtx gsym
= rs6000_got_sym ();
9211 got
= gen_reg_rtx (Pmode
);
9213 rs6000_emit_move (got
, gsym
, Pmode
);
9218 tmp1
= gen_reg_rtx (Pmode
);
9219 tmp2
= gen_reg_rtx (Pmode
);
9220 mem
= gen_const_mem (Pmode
, tmp1
);
9221 lab
= gen_label_rtx ();
9222 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
9223 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
9224 if (TARGET_LINK_STACK
)
9225 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
9226 emit_move_insn (tmp2
, mem
);
9227 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
9228 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
9233 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
9235 tga
= rs6000_tls_get_addr ();
9236 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
9237 1, const0_rtx
, Pmode
);
9239 r3
= gen_rtx_REG (Pmode
, 3);
9240 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9243 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
9245 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
9247 else if (DEFAULT_ABI
== ABI_V4
)
9248 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
9251 call_insn
= last_call_insn ();
9252 PATTERN (call_insn
) = insn
;
9253 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9254 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9255 pic_offset_table_rtx
);
9257 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
9259 tga
= rs6000_tls_get_addr ();
9260 tmp1
= gen_reg_rtx (Pmode
);
9261 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
9262 1, const0_rtx
, Pmode
);
9264 r3
= gen_rtx_REG (Pmode
, 3);
9265 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9268 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
9270 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
9272 else if (DEFAULT_ABI
== ABI_V4
)
9273 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
9276 call_insn
= last_call_insn ();
9277 PATTERN (call_insn
) = insn
;
9278 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9279 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9280 pic_offset_table_rtx
);
9282 if (rs6000_tls_size
== 16)
9285 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
9287 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
9289 else if (rs6000_tls_size
== 32)
9291 tmp2
= gen_reg_rtx (Pmode
);
9293 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
9295 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
9298 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
9300 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
9304 tmp2
= gen_reg_rtx (Pmode
);
9306 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
9308 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
9310 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
9316 /* IE, or 64-bit offset LE. */
9317 tmp2
= gen_reg_rtx (Pmode
);
9319 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
9321 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
9324 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
9326 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
9334 /* Only create the global variable for the stack protect guard if we are using
9335 the global flavor of that guard. */
9337 rs6000_init_stack_protect_guard (void)
9339 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
9340 return default_stack_protect_guard ();
9345 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9348 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
9350 if (GET_CODE (x
) == HIGH
9351 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
9354 /* A TLS symbol in the TOC cannot contain a sum. */
9355 if (GET_CODE (x
) == CONST
9356 && GET_CODE (XEXP (x
, 0)) == PLUS
9357 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
9358 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
9361 /* Do not place an ELF TLS symbol in the constant pool. */
9362 return TARGET_ELF
&& tls_referenced_p (x
);
9365 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9366 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9367 can be addressed relative to the toc pointer. */
9370 use_toc_relative_ref (rtx sym
, machine_mode mode
)
9372 return ((constant_pool_expr_p (sym
)
9373 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
9374 get_pool_mode (sym
)))
9375 || (TARGET_CMODEL
== CMODEL_MEDIUM
9376 && SYMBOL_REF_LOCAL_P (sym
)
9377 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
9380 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9381 replace the input X, or the original X if no replacement is called for.
9382 The output parameter *WIN is 1 if the calling macro should goto WIN,
9385 For RS/6000, we wish to handle large displacements off a base
9386 register by splitting the addend across an addiu/addis and the mem insn.
9387 This cuts number of extra insns needed from 3 to 1.
9389 On Darwin, we use this to generate code for floating point constants.
9390 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9391 The Darwin code is inside #if TARGET_MACHO because only then are the
9392 machopic_* functions defined. */
9394 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
9395 int opnum
, int type
,
9396 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
9398 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9399 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9401 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9402 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9405 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
9406 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
9407 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
9408 && TARGET_P9_VECTOR
)
9409 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
9410 && TARGET_P9_VECTOR
)))
9411 reg_offset_p
= false;
9413 /* We must recognize output that we have already generated ourselves. */
9414 if (GET_CODE (x
) == PLUS
9415 && GET_CODE (XEXP (x
, 0)) == PLUS
9416 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9417 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9418 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9420 if (TARGET_DEBUG_ADDR
)
9422 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
9425 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9426 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9427 opnum
, (enum reload_type
) type
);
9432 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9433 if (GET_CODE (x
) == LO_SUM
9434 && GET_CODE (XEXP (x
, 0)) == HIGH
)
9436 if (TARGET_DEBUG_ADDR
)
9438 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
9441 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9442 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9443 opnum
, (enum reload_type
) type
);
9449 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
9450 && GET_CODE (x
) == LO_SUM
9451 && GET_CODE (XEXP (x
, 0)) == PLUS
9452 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
9453 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
9454 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
9455 && machopic_operand_p (XEXP (x
, 1)))
9457 /* Result of previous invocation of this function on Darwin
9458 floating point constant. */
9459 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9460 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9461 opnum
, (enum reload_type
) type
);
9467 if (TARGET_CMODEL
!= CMODEL_SMALL
9470 && small_toc_ref (x
, VOIDmode
))
9472 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
9473 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
9474 if (TARGET_DEBUG_ADDR
)
9476 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
9479 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9480 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9481 opnum
, (enum reload_type
) type
);
9486 if (GET_CODE (x
) == PLUS
9487 && REG_P (XEXP (x
, 0))
9488 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
9489 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
9490 && CONST_INT_P (XEXP (x
, 1))
9492 && !PAIRED_VECTOR_MODE (mode
)
9493 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
9495 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
9496 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
9498 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9500 /* Check for 32-bit overflow or quad addresses with one of the
9501 four least significant bits set. */
9502 if (high
+ low
!= val
9503 || (quad_offset_p
&& (low
& 0xf)))
9509 /* Reload the high part into a base reg; leave the low part
9510 in the mem directly. */
9512 x
= gen_rtx_PLUS (GET_MODE (x
),
9513 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
9517 if (TARGET_DEBUG_ADDR
)
9519 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
9522 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9523 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9524 opnum
, (enum reload_type
) type
);
9529 if (GET_CODE (x
) == SYMBOL_REF
9532 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
9533 && !PAIRED_VECTOR_MODE (mode
)
9535 && DEFAULT_ABI
== ABI_DARWIN
9536 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
9537 && machopic_symbol_defined_p (x
)
9539 && DEFAULT_ABI
== ABI_V4
9542 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9543 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9545 ??? Assume floating point reg based on mode? This assumption is
9546 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9547 where reload ends up doing a DFmode load of a constant from
9548 mem using two gprs. Unfortunately, at this point reload
9549 hasn't yet selected regs so poking around in reload data
9550 won't help and even if we could figure out the regs reliably,
9551 we'd still want to allow this transformation when the mem is
9552 naturally aligned. Since we say the address is good here, we
9553 can't disable offsets from LO_SUMs in mem_operand_gpr.
9554 FIXME: Allow offset from lo_sum for other modes too, when
9555 mem is sufficiently aligned.
9557 Also disallow this if the type can go in VMX/Altivec registers, since
9558 those registers do not have d-form (reg+offset) address modes. */
9559 && !reg_addr
[mode
].scalar_in_vmx_p
9564 && (mode
!= TImode
|| !TARGET_VSX
)
9566 && (mode
!= DImode
|| TARGET_POWERPC64
)
9567 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9568 || (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)))
9573 rtx offset
= machopic_gen_offset (x
);
9574 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9575 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9576 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9580 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9581 gen_rtx_HIGH (Pmode
, x
), x
);
9583 if (TARGET_DEBUG_ADDR
)
9585 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9588 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9589 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9590 opnum
, (enum reload_type
) type
);
9595 /* Reload an offset address wrapped by an AND that represents the
9596 masking of the lower bits. Strip the outer AND and let reload
9597 convert the offset address into an indirect address. For VSX,
9598 force reload to create the address with an AND in a separate
9599 register, because we can't guarantee an altivec register will
9601 if (VECTOR_MEM_ALTIVEC_P (mode
)
9602 && GET_CODE (x
) == AND
9603 && GET_CODE (XEXP (x
, 0)) == PLUS
9604 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9605 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9606 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9607 && INTVAL (XEXP (x
, 1)) == -16)
9617 && GET_CODE (x
) == SYMBOL_REF
9618 && use_toc_relative_ref (x
, mode
))
9620 x
= create_TOC_reference (x
, NULL_RTX
);
9621 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9623 if (TARGET_DEBUG_ADDR
)
9625 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9628 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9629 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9630 opnum
, (enum reload_type
) type
);
9639 /* Debug version of rs6000_legitimize_reload_address. */
9641 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9642 int opnum
, int type
,
9643 int ind_levels
, int *win
)
9645 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9648 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9649 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9650 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9654 fprintf (stderr
, "Same address returned\n");
9656 fprintf (stderr
, "NULL returned\n");
9659 fprintf (stderr
, "New address:\n");
9666 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9667 that is a valid memory address for an instruction.
9668 The MODE argument is the machine mode for the MEM expression
9669 that wants to use this address.
9671 On the RS/6000, there are four valid address: a SYMBOL_REF that
9672 refers to a constant pool entry of an address (or the sum of it
9673 plus a constant), a short (16-bit signed) constant plus a register,
9674 the sum of two registers, or a register indirect, possibly with an
9675 auto-increment. For DFmode, DDmode and DImode with a constant plus
9676 register, we must ensure that both words are addressable or PowerPC64
9677 with offset word aligned.
9679 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9680 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9681 because adjacent memory cells are accessed by adding word-sized offsets
9682 during assembly output. */
9684 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9686 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9687 bool quad_offset_p
= mode_supports_vsx_dform_quad (mode
);
9689 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9690 if (VECTOR_MEM_ALTIVEC_P (mode
)
9691 && GET_CODE (x
) == AND
9692 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9693 && INTVAL (XEXP (x
, 1)) == -16)
9696 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9698 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9701 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9702 && mode_supports_pre_incdec_p (mode
)
9703 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9705 /* Handle restricted vector d-form offsets in ISA 3.0. */
9708 if (quad_address_p (x
, mode
, reg_ok_strict
))
9711 else if (virtual_stack_registers_memory_p (x
))
9714 else if (reg_offset_p
)
9716 if (legitimate_small_data_p (mode
, x
))
9718 if (legitimate_constant_pool_address_p (x
, mode
,
9719 reg_ok_strict
|| lra_in_progress
))
9721 if (reg_addr
[mode
].fused_toc
&& GET_CODE (x
) == UNSPEC
9722 && XINT (x
, 1) == UNSPEC_FUSION_ADDIS
)
9726 /* For TImode, if we have TImode in VSX registers, only allow register
9727 indirect addresses. This will allow the values to go in either GPRs
9728 or VSX registers without reloading. The vector types would tend to
9729 go into VSX registers, so we allow REG+REG, while TImode seems
9730 somewhat split, in that some uses are GPR based, and some VSX based. */
9731 /* FIXME: We could loosen this by changing the following to
9732 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9733 but currently we cannot allow REG+REG addressing for TImode. See
9734 PR72827 for complete details on how this ends up hoodwinking DSE. */
9735 if (mode
== TImode
&& TARGET_VSX
)
9737 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9740 && GET_CODE (x
) == PLUS
9741 && GET_CODE (XEXP (x
, 0)) == REG
9742 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9743 || XEXP (x
, 0) == arg_pointer_rtx
)
9744 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9746 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9748 if (!FLOAT128_2REG_P (mode
)
9749 && ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
9751 || (mode
!= DFmode
&& mode
!= DDmode
))
9752 && (TARGET_POWERPC64
|| mode
!= DImode
)
9753 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9755 && !avoiding_indexed_address_p (mode
)
9756 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9758 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9759 && mode_supports_pre_modify_p (mode
)
9760 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9761 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9762 reg_ok_strict
, false)
9763 || (!avoiding_indexed_address_p (mode
)
9764 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9765 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9767 if (reg_offset_p
&& !quad_offset_p
9768 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9773 /* Debug version of rs6000_legitimate_address_p. */
9775 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9778 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9780 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9781 "strict = %d, reload = %s, code = %s\n",
9782 ret
? "true" : "false",
9783 GET_MODE_NAME (mode
),
9785 (reload_completed
? "after" : "before"),
9786 GET_RTX_NAME (GET_CODE (x
)));
9792 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9795 rs6000_mode_dependent_address_p (const_rtx addr
,
9796 addr_space_t as ATTRIBUTE_UNUSED
)
9798 return rs6000_mode_dependent_address_ptr (addr
);
9801 /* Go to LABEL if ADDR (a legitimate address expression)
9802 has an effect that depends on the machine mode it is used for.
9804 On the RS/6000 this is true of all integral offsets (since AltiVec
9805 and VSX modes don't allow them) or is a pre-increment or decrement.
9807 ??? Except that due to conceptual problems in offsettable_address_p
9808 we can't really report the problems of integral offsets. So leave
9809 this assuming that the adjustable offset must be valid for the
9810 sub-words of a TFmode operand, which is what we had before. */
9813 rs6000_mode_dependent_address (const_rtx addr
)
9815 switch (GET_CODE (addr
))
9818 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9819 is considered a legitimate address before reload, so there
9820 are no offset restrictions in that case. Note that this
9821 condition is safe in strict mode because any address involving
9822 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9823 been rejected as illegitimate. */
9824 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9825 && XEXP (addr
, 0) != arg_pointer_rtx
9826 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9828 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9829 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9834 /* Anything in the constant pool is sufficiently aligned that
9835 all bytes have the same high part address. */
9836 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9838 /* Auto-increment cases are now treated generically in recog.c. */
9840 return TARGET_UPDATE
;
9842 /* AND is only allowed in Altivec loads. */
9853 /* Debug version of rs6000_mode_dependent_address. */
9855 rs6000_debug_mode_dependent_address (const_rtx addr
)
9857 bool ret
= rs6000_mode_dependent_address (addr
);
9859 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9860 ret
? "true" : "false");
9866 /* Implement FIND_BASE_TERM. */
9869 rs6000_find_base_term (rtx op
)
9874 if (GET_CODE (base
) == CONST
)
9875 base
= XEXP (base
, 0);
9876 if (GET_CODE (base
) == PLUS
)
9877 base
= XEXP (base
, 0);
9878 if (GET_CODE (base
) == UNSPEC
)
9879 switch (XINT (base
, 1))
9882 case UNSPEC_MACHOPIC_OFFSET
:
9883 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9884 for aliasing purposes. */
9885 return XVECEXP (base
, 0, 0);
9891 /* More elaborate version of recog's offsettable_memref_p predicate
9892 that works around the ??? note of rs6000_mode_dependent_address.
9893 In particular it accepts
9895 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9897 in 32-bit mode, that the recog predicate rejects. */
9900 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
)
9907 /* First mimic offsettable_memref_p. */
9908 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
9911 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9912 the latter predicate knows nothing about the mode of the memory
9913 reference and, therefore, assumes that it is the largest supported
9914 mode (TFmode). As a consequence, legitimate offsettable memory
9915 references are rejected. rs6000_legitimate_offset_address_p contains
9916 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9917 at least with a little bit of help here given that we know the
9918 actual registers used. */
9919 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9920 || GET_MODE_SIZE (reg_mode
) == 4);
9921 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9925 /* Determine the reassociation width to be used in reassociate_bb.
9926 This takes into account how many parallel operations we
9927 can actually do of a given type, and also the latency.
9931 vect add/sub/mul 2/cycle
9932 fp add/sub/mul 2/cycle
9937 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
9942 case PROCESSOR_POWER8
:
9943 case PROCESSOR_POWER9
:
9944 if (DECIMAL_FLOAT_MODE_P (mode
))
9946 if (VECTOR_MODE_P (mode
))
9948 if (INTEGRAL_MODE_P (mode
))
9949 return opc
== MULT_EXPR
? 4 : 6;
9950 if (FLOAT_MODE_P (mode
))
9959 /* Change register usage conditional on target flags. */
9961 rs6000_conditional_register_usage (void)
9965 if (TARGET_DEBUG_TARGET
)
9966 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
9968 /* Set MQ register fixed (already call_used) so that it will not be
9972 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9974 fixed_regs
[13] = call_used_regs
[13]
9975 = call_really_used_regs
[13] = 1;
9977 /* Conditionally disable FPRs. */
9978 if (TARGET_SOFT_FLOAT
)
9979 for (i
= 32; i
< 64; i
++)
9980 fixed_regs
[i
] = call_used_regs
[i
]
9981 = call_really_used_regs
[i
] = 1;
9983 /* The TOC register is not killed across calls in a way that is
9984 visible to the compiler. */
9985 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9986 call_really_used_regs
[2] = 0;
9988 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
9989 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9991 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
9992 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9993 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9994 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9996 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
9997 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9998 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9999 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10001 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
10002 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
10003 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
10005 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
10007 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
10008 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10009 call_really_used_regs
[VRSAVE_REGNO
] = 1;
10012 if (TARGET_ALTIVEC
|| TARGET_VSX
)
10013 global_regs
[VSCR_REGNO
] = 1;
10015 if (TARGET_ALTIVEC_ABI
)
10017 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
10018 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10020 /* AIX reserves VR20:31 in non-extended ABI mode. */
10022 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
10023 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
10028 /* Output insns to set DEST equal to the constant SOURCE as a series of
10029 lis, ori and shl instructions and return TRUE. */
10032 rs6000_emit_set_const (rtx dest
, rtx source
)
10034 machine_mode mode
= GET_MODE (dest
);
10039 gcc_checking_assert (CONST_INT_P (source
));
10040 c
= INTVAL (source
);
10045 emit_insn (gen_rtx_SET (dest
, source
));
10049 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
10051 emit_insn (gen_rtx_SET (copy_rtx (temp
),
10052 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
10053 emit_insn (gen_rtx_SET (dest
,
10054 gen_rtx_IOR (SImode
, copy_rtx (temp
),
10055 GEN_INT (c
& 0xffff))));
10059 if (!TARGET_POWERPC64
)
10063 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
10065 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
10067 emit_move_insn (hi
, GEN_INT (c
>> 32));
10068 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
10069 emit_move_insn (lo
, GEN_INT (c
));
10072 rs6000_emit_set_long_const (dest
, c
);
10076 gcc_unreachable ();
10079 insn
= get_last_insn ();
10080 set
= single_set (insn
);
10081 if (! CONSTANT_P (SET_SRC (set
)))
10082 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
10087 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10088 Output insns to set DEST equal to the constant C as a series of
10089 lis, ori and shl instructions. */
10092 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
10095 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
10105 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
10106 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
10107 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
10109 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
10110 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
10112 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10114 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10115 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10117 emit_move_insn (dest
,
10118 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10121 else if (ud3
== 0 && ud4
== 0)
10123 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10125 gcc_assert (ud2
& 0x8000);
10126 emit_move_insn (copy_rtx (temp
),
10127 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
10129 emit_move_insn (copy_rtx (temp
),
10130 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10132 emit_move_insn (dest
,
10133 gen_rtx_ZERO_EXTEND (DImode
,
10134 gen_lowpart (SImode
,
10135 copy_rtx (temp
))));
10137 else if ((ud4
== 0xffff && (ud3
& 0x8000))
10138 || (ud4
== 0 && ! (ud3
& 0x8000)))
10140 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10142 emit_move_insn (copy_rtx (temp
),
10143 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
10145 emit_move_insn (copy_rtx (temp
),
10146 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10148 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10149 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10152 emit_move_insn (dest
,
10153 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10158 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
10160 emit_move_insn (copy_rtx (temp
),
10161 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
10163 emit_move_insn (copy_rtx (temp
),
10164 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10167 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
10168 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
10171 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
10172 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10173 GEN_INT (ud2
<< 16)));
10175 emit_move_insn (dest
,
10176 gen_rtx_IOR (DImode
, copy_rtx (temp
),
10181 /* Helper for the following. Get rid of [r+r] memory refs
10182 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10185 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
10187 if (GET_CODE (operands
[0]) == MEM
10188 && GET_CODE (XEXP (operands
[0], 0)) != REG
10189 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
10190 GET_MODE (operands
[0]), false))
10192 = replace_equiv_address (operands
[0],
10193 copy_addr_to_reg (XEXP (operands
[0], 0)));
10195 if (GET_CODE (operands
[1]) == MEM
10196 && GET_CODE (XEXP (operands
[1], 0)) != REG
10197 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
10198 GET_MODE (operands
[1]), false))
10200 = replace_equiv_address (operands
[1],
10201 copy_addr_to_reg (XEXP (operands
[1], 0)));
10204 /* Generate a vector of constants to permute MODE for a little-endian
10205 storage operation by swapping the two halves of a vector. */
10207 rs6000_const_vec (machine_mode mode
)
10235 v
= rtvec_alloc (subparts
);
10237 for (i
= 0; i
< subparts
/ 2; ++i
)
10238 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
10239 for (i
= subparts
/ 2; i
< subparts
; ++i
)
10240 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
10245 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10246 store operation. */
10248 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
10250 /* Scalar permutations are easier to express in integer modes rather than
10251 floating-point modes, so cast them here. We use V1TImode instead
10252 of TImode to ensure that the values don't go through GPRs. */
10253 if (FLOAT128_VECTOR_P (mode
))
10255 dest
= gen_lowpart (V1TImode
, dest
);
10256 source
= gen_lowpart (V1TImode
, source
);
10260 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10262 if (mode
== TImode
|| mode
== V1TImode
)
10263 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
10267 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
10268 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
10272 /* Emit a little-endian load from vector memory location SOURCE to VSX
10273 register DEST in mode MODE. The load is done with two permuting
10274 insn's that represent an lxvd2x and xxpermdi. */
10276 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
10278 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10280 if (mode
== TImode
|| mode
== V1TImode
)
10283 dest
= gen_lowpart (V2DImode
, dest
);
10284 source
= adjust_address (source
, V2DImode
, 0);
10287 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
10288 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10289 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10292 /* Emit a little-endian store to vector memory location DEST from VSX
10293 register SOURCE in mode MODE. The store is done with two permuting
10294 insn's that represent an xxpermdi and an stxvd2x. */
10296 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
10298 /* This should never be called during or after LRA, because it does
10299 not re-permute the source register. It is intended only for use
10301 gcc_assert (!lra_in_progress
&& !reload_completed
);
10303 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10305 if (mode
== TImode
|| mode
== V1TImode
)
10308 dest
= adjust_address (dest
, V2DImode
, 0);
10309 source
= gen_lowpart (V2DImode
, source
);
10312 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
10313 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10314 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10317 /* Emit a sequence representing a little-endian VSX load or store,
10318 moving data from SOURCE to DEST in mode MODE. This is done
10319 separately from rs6000_emit_move to ensure it is called only
10320 during expand. LE VSX loads and stores introduced later are
10321 handled with a split. The expand-time RTL generation allows
10322 us to optimize away redundant pairs of register-permutes. */
10324 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
10326 gcc_assert (!BYTES_BIG_ENDIAN
10327 && VECTOR_MEM_VSX_P (mode
)
10328 && !TARGET_P9_VECTOR
10329 && !gpr_or_gpr_p (dest
, source
)
10330 && (MEM_P (source
) ^ MEM_P (dest
)));
10332 if (MEM_P (source
))
10334 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
10335 rs6000_emit_le_vsx_load (dest
, source
, mode
);
10339 if (!REG_P (source
))
10340 source
= force_reg (mode
, source
);
10341 rs6000_emit_le_vsx_store (dest
, source
, mode
);
10345 /* Return whether a SFmode or SImode move can be done without converting one
10346 mode to another. This arrises when we have:
10348 (SUBREG:SF (REG:SI ...))
10349 (SUBREG:SI (REG:SF ...))
10351 and one of the values is in a floating point/vector register, where SFmode
10352 scalars are stored in DFmode format. */
10355 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
10357 if (TARGET_ALLOW_SF_SUBREG
)
10360 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
10363 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
10366 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10367 if (SUBREG_P (dest
))
10369 rtx dest_subreg
= SUBREG_REG (dest
);
10370 rtx src_subreg
= SUBREG_REG (src
);
10371 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
10378 /* Helper function to change moves with:
10380 (SUBREG:SF (REG:SI)) and
10381 (SUBREG:SI (REG:SF))
10383 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10384 values are stored as DFmode values in the VSX registers. We need to convert
10385 the bits before we can use a direct move or operate on the bits in the
10386 vector register as an integer type.
10388 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10391 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
10393 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
10394 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
10395 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
10397 rtx inner_source
= SUBREG_REG (source
);
10398 machine_mode inner_mode
= GET_MODE (inner_source
);
10400 if (mode
== SImode
&& inner_mode
== SFmode
)
10402 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
10406 if (mode
== SFmode
&& inner_mode
== SImode
)
10408 emit_insn (gen_movsf_from_si (dest
, inner_source
));
10416 /* Emit a move from SOURCE to DEST in mode MODE. */
10418 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
10421 operands
[0] = dest
;
10422 operands
[1] = source
;
10424 if (TARGET_DEBUG_ADDR
)
10427 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10428 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10429 GET_MODE_NAME (mode
),
10432 can_create_pseudo_p ());
10434 fprintf (stderr
, "source:\n");
10435 debug_rtx (source
);
10438 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10439 if (CONST_WIDE_INT_P (operands
[1])
10440 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
10442 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10443 gcc_unreachable ();
10446 /* See if we need to special case SImode/SFmode SUBREG moves. */
10447 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
10448 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
10451 /* Check if GCC is setting up a block move that will end up using FP
10452 registers as temporaries. We must make sure this is acceptable. */
10453 if (GET_CODE (operands
[0]) == MEM
10454 && GET_CODE (operands
[1]) == MEM
10456 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
10457 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
10458 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
10459 ? 32 : MEM_ALIGN (operands
[0])))
10460 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
10462 : MEM_ALIGN (operands
[1]))))
10463 && ! MEM_VOLATILE_P (operands
[0])
10464 && ! MEM_VOLATILE_P (operands
[1]))
10466 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
10467 adjust_address (operands
[1], SImode
, 0));
10468 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
10469 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
10473 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
10474 && !gpc_reg_operand (operands
[1], mode
))
10475 operands
[1] = force_reg (mode
, operands
[1]);
10477 /* Recognize the case where operand[1] is a reference to thread-local
10478 data and load its address to a register. */
10479 if (tls_referenced_p (operands
[1]))
10481 enum tls_model model
;
10482 rtx tmp
= operands
[1];
10485 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
10487 addend
= XEXP (XEXP (tmp
, 0), 1);
10488 tmp
= XEXP (XEXP (tmp
, 0), 0);
10491 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
10492 model
= SYMBOL_REF_TLS_MODEL (tmp
);
10493 gcc_assert (model
!= 0);
10495 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
10498 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
10499 tmp
= force_operand (tmp
, operands
[0]);
10504 /* 128-bit constant floating-point values on Darwin should really be loaded
10505 as two parts. However, this premature splitting is a problem when DFmode
10506 values can go into Altivec registers. */
10507 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
10508 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
10510 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
10511 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
10513 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
10514 GET_MODE_SIZE (DFmode
)),
10515 simplify_gen_subreg (DFmode
, operands
[1], mode
,
10516 GET_MODE_SIZE (DFmode
)),
10521 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10522 p1:SD) if p1 is not of floating point class and p0 is spilled as
10523 we can have no analogous movsd_store for this. */
10524 if (lra_in_progress
&& mode
== DDmode
10525 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10526 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10527 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
10528 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
10531 int regno
= REGNO (SUBREG_REG (operands
[1]));
10533 if (regno
>= FIRST_PSEUDO_REGISTER
)
10535 cl
= reg_preferred_class (regno
);
10536 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10538 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10541 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10542 operands
[1] = SUBREG_REG (operands
[1]);
10545 if (lra_in_progress
10547 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10548 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10549 && (REG_P (operands
[1])
10550 || (GET_CODE (operands
[1]) == SUBREG
10551 && REG_P (SUBREG_REG (operands
[1])))))
10553 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10554 ? SUBREG_REG (operands
[1]) : operands
[1]);
10557 if (regno
>= FIRST_PSEUDO_REGISTER
)
10559 cl
= reg_preferred_class (regno
);
10560 gcc_assert (cl
!= NO_REGS
);
10561 regno
= ira_class_hard_regs
[cl
][0];
10563 if (FP_REGNO_P (regno
))
10565 if (GET_MODE (operands
[0]) != DDmode
)
10566 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10567 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10569 else if (INT_REGNO_P (regno
))
10570 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10575 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10576 p:DD)) if p0 is not of floating point class and p1 is spilled as
10577 we can have no analogous movsd_load for this. */
10578 if (lra_in_progress
&& mode
== DDmode
10579 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10580 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10581 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10582 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10585 int regno
= REGNO (SUBREG_REG (operands
[0]));
10587 if (regno
>= FIRST_PSEUDO_REGISTER
)
10589 cl
= reg_preferred_class (regno
);
10590 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10592 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10595 operands
[0] = SUBREG_REG (operands
[0]);
10596 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10599 if (lra_in_progress
10601 && (REG_P (operands
[0])
10602 || (GET_CODE (operands
[0]) == SUBREG
10603 && REG_P (SUBREG_REG (operands
[0]))))
10604 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10605 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10607 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10608 ? SUBREG_REG (operands
[0]) : operands
[0]);
10611 if (regno
>= FIRST_PSEUDO_REGISTER
)
10613 cl
= reg_preferred_class (regno
);
10614 gcc_assert (cl
!= NO_REGS
);
10615 regno
= ira_class_hard_regs
[cl
][0];
10617 if (FP_REGNO_P (regno
))
10619 if (GET_MODE (operands
[1]) != DDmode
)
10620 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10621 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10623 else if (INT_REGNO_P (regno
))
10624 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10630 /* FIXME: In the long term, this switch statement should go away
10631 and be replaced by a sequence of tests based on things like
10637 if (CONSTANT_P (operands
[1])
10638 && GET_CODE (operands
[1]) != CONST_INT
)
10639 operands
[1] = force_const_mem (mode
, operands
[1]);
10646 if (FLOAT128_2REG_P (mode
))
10647 rs6000_eliminate_indexed_memrefs (operands
);
10654 if (CONSTANT_P (operands
[1])
10655 && ! easy_fp_constant (operands
[1], mode
))
10656 operands
[1] = force_const_mem (mode
, operands
[1]);
10668 if (CONSTANT_P (operands
[1])
10669 && !easy_vector_constant (operands
[1], mode
))
10670 operands
[1] = force_const_mem (mode
, operands
[1]);
10675 /* Use default pattern for address of ELF small data */
10678 && DEFAULT_ABI
== ABI_V4
10679 && (GET_CODE (operands
[1]) == SYMBOL_REF
10680 || GET_CODE (operands
[1]) == CONST
)
10681 && small_data_operand (operands
[1], mode
))
10683 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10687 if (DEFAULT_ABI
== ABI_V4
10688 && mode
== Pmode
&& mode
== SImode
10689 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10691 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10695 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10699 && CONSTANT_P (operands
[1])
10700 && GET_CODE (operands
[1]) != HIGH
10701 && GET_CODE (operands
[1]) != CONST_INT
)
10703 rtx target
= (!can_create_pseudo_p ()
10705 : gen_reg_rtx (mode
));
10707 /* If this is a function address on -mcall-aixdesc,
10708 convert it to the address of the descriptor. */
10709 if (DEFAULT_ABI
== ABI_AIX
10710 && GET_CODE (operands
[1]) == SYMBOL_REF
10711 && XSTR (operands
[1], 0)[0] == '.')
10713 const char *name
= XSTR (operands
[1], 0);
10715 while (*name
== '.')
10717 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10718 CONSTANT_POOL_ADDRESS_P (new_ref
)
10719 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10720 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10721 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10722 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10723 operands
[1] = new_ref
;
10726 if (DEFAULT_ABI
== ABI_DARWIN
)
10729 if (MACHO_DYNAMIC_NO_PIC_P
)
10731 /* Take care of any required data indirection. */
10732 operands
[1] = rs6000_machopic_legitimize_pic_address (
10733 operands
[1], mode
, operands
[0]);
10734 if (operands
[0] != operands
[1])
10735 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10739 emit_insn (gen_macho_high (target
, operands
[1]));
10740 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10744 emit_insn (gen_elf_high (target
, operands
[1]));
10745 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10749 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10750 and we have put it in the TOC, we just need to make a TOC-relative
10751 reference to it. */
10753 && GET_CODE (operands
[1]) == SYMBOL_REF
10754 && use_toc_relative_ref (operands
[1], mode
))
10755 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10756 else if (mode
== Pmode
10757 && CONSTANT_P (operands
[1])
10758 && GET_CODE (operands
[1]) != HIGH
10759 && ((GET_CODE (operands
[1]) != CONST_INT
10760 && ! easy_fp_constant (operands
[1], mode
))
10761 || (GET_CODE (operands
[1]) == CONST_INT
10762 && (num_insns_constant (operands
[1], mode
)
10763 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10764 || (GET_CODE (operands
[0]) == REG
10765 && FP_REGNO_P (REGNO (operands
[0]))))
10766 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10767 && (TARGET_CMODEL
== CMODEL_SMALL
10768 || can_create_pseudo_p ()
10769 || (REG_P (operands
[0])
10770 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10774 /* Darwin uses a special PIC legitimizer. */
10775 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10778 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10780 if (operands
[0] != operands
[1])
10781 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10786 /* If we are to limit the number of things we put in the TOC and
10787 this is a symbol plus a constant we can add in one insn,
10788 just put the symbol in the TOC and add the constant. */
10789 if (GET_CODE (operands
[1]) == CONST
10790 && TARGET_NO_SUM_IN_TOC
10791 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10792 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10793 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10794 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10795 && ! side_effects_p (operands
[0]))
10798 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10799 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10801 sym
= force_reg (mode
, sym
);
10802 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10806 operands
[1] = force_const_mem (mode
, operands
[1]);
10809 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10810 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10812 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10814 operands
[1] = gen_const_mem (mode
, tocref
);
10815 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10821 if (!VECTOR_MEM_VSX_P (TImode
))
10822 rs6000_eliminate_indexed_memrefs (operands
);
10826 rs6000_eliminate_indexed_memrefs (operands
);
10830 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10833 /* Above, we may have called force_const_mem which may have returned
10834 an invalid address. If we can, fix this up; otherwise, reload will
10835 have to deal with it. */
10836 if (GET_CODE (operands
[1]) == MEM
)
10837 operands
[1] = validize_mem (operands
[1]);
10839 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10842 /* Nonzero if we can use a floating-point register to pass this arg. */
10843 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10844 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10845 && (CUM)->fregno <= FP_ARG_MAX_REG \
10846 && TARGET_HARD_FLOAT)
10848 /* Nonzero if we can use an AltiVec register to pass this arg. */
10849 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10850 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10851 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10852 && TARGET_ALTIVEC_ABI \
10855 /* Walk down the type tree of TYPE counting consecutive base elements.
10856 If *MODEP is VOIDmode, then set it to the first valid floating point
10857 or vector type. If a non-floating point or vector type is found, or
10858 if a floating point or vector type that doesn't match a non-VOIDmode
10859 *MODEP is found, then return -1, otherwise return the count in the
10863 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10866 HOST_WIDE_INT size
;
10868 switch (TREE_CODE (type
))
10871 mode
= TYPE_MODE (type
);
10872 if (!SCALAR_FLOAT_MODE_P (mode
))
10875 if (*modep
== VOIDmode
)
10878 if (*modep
== mode
)
10884 mode
= TYPE_MODE (TREE_TYPE (type
));
10885 if (!SCALAR_FLOAT_MODE_P (mode
))
10888 if (*modep
== VOIDmode
)
10891 if (*modep
== mode
)
10897 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10900 /* Use V4SImode as representative of all 128-bit vector types. */
10901 size
= int_size_in_bytes (type
);
10911 if (*modep
== VOIDmode
)
10914 /* Vector modes are considered to be opaque: two vectors are
10915 equivalent for the purposes of being homogeneous aggregates
10916 if they are the same size. */
10917 if (*modep
== mode
)
10925 tree index
= TYPE_DOMAIN (type
);
10927 /* Can't handle incomplete types nor sizes that are not
10929 if (!COMPLETE_TYPE_P (type
)
10930 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10933 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10936 || !TYPE_MAX_VALUE (index
)
10937 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
10938 || !TYPE_MIN_VALUE (index
)
10939 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
10943 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
10944 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
10946 /* There must be no padding. */
10947 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
10959 /* Can't handle incomplete types nor sizes that are not
10961 if (!COMPLETE_TYPE_P (type
)
10962 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10965 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10967 if (TREE_CODE (field
) != FIELD_DECL
)
10970 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10973 count
+= sub_count
;
10976 /* There must be no padding. */
10977 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
10984 case QUAL_UNION_TYPE
:
10986 /* These aren't very interesting except in a degenerate case. */
10991 /* Can't handle incomplete types nor sizes that are not
10993 if (!COMPLETE_TYPE_P (type
)
10994 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10997 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10999 if (TREE_CODE (field
) != FIELD_DECL
)
11002 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
11005 count
= count
> sub_count
? count
: sub_count
;
11008 /* There must be no padding. */
11009 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
11022 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11023 float or vector aggregate that shall be passed in FP/vector registers
11024 according to the ELFv2 ABI, return the homogeneous element mode in
11025 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11027 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11030 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
11031 machine_mode
*elt_mode
,
11034 /* Note that we do not accept complex types at the top level as
11035 homogeneous aggregates; these types are handled via the
11036 targetm.calls.split_complex_arg mechanism. Complex types
11037 can be elements of homogeneous aggregates, however. */
11038 if (DEFAULT_ABI
== ABI_ELFv2
&& type
&& AGGREGATE_TYPE_P (type
))
11040 machine_mode field_mode
= VOIDmode
;
11041 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
11043 if (field_count
> 0)
11045 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
) ?
11046 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
11048 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11049 up to AGGR_ARG_NUM_REG registers. */
11050 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
11053 *elt_mode
= field_mode
;
11055 *n_elts
= field_count
;
11068 /* Return a nonzero value to say to return the function value in
11069 memory, just as large structures are always returned. TYPE will be
11070 the data type of the value, and FNTYPE will be the type of the
11071 function doing the returning, or @code{NULL} for libcalls.
11073 The AIX ABI for the RS/6000 specifies that all structures are
11074 returned in memory. The Darwin ABI does the same.
11076 For the Darwin 64 Bit ABI, a function result can be returned in
11077 registers or in memory, depending on the size of the return data
11078 type. If it is returned in registers, the value occupies the same
11079 registers as it would if it were the first and only function
11080 argument. Otherwise, the function places its result in memory at
11081 the location pointed to by GPR3.
11083 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11084 but a draft put them in memory, and GCC used to implement the draft
11085 instead of the final standard. Therefore, aix_struct_return
11086 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11087 compatibility can change DRAFT_V4_STRUCT_RET to override the
11088 default, and -m switches get the final word. See
11089 rs6000_option_override_internal for more details.
11091 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11092 long double support is enabled. These values are returned in memory.
11094 int_size_in_bytes returns -1 for variable size objects, which go in
11095 memory always. The cast to unsigned makes -1 > 8. */
11098 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
11100 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11102 && rs6000_darwin64_abi
11103 && TREE_CODE (type
) == RECORD_TYPE
11104 && int_size_in_bytes (type
) > 0)
11106 CUMULATIVE_ARGS valcum
;
11110 valcum
.fregno
= FP_ARG_MIN_REG
;
11111 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
11112 /* Do a trial code generation as if this were going to be passed
11113 as an argument; if any part goes in memory, we return NULL. */
11114 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
11117 /* Otherwise fall through to more conventional ABI rules. */
11120 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11121 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
11125 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11126 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
11127 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
11130 if (AGGREGATE_TYPE_P (type
)
11131 && (aix_struct_return
11132 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
11135 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11136 modes only exist for GCC vector types if -maltivec. */
11137 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
11138 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
11141 /* Return synthetic vectors in memory. */
11142 if (TREE_CODE (type
) == VECTOR_TYPE
11143 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
11145 static bool warned_for_return_big_vectors
= false;
11146 if (!warned_for_return_big_vectors
)
11148 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
11149 "non-standard ABI extension with no compatibility "
11151 warned_for_return_big_vectors
= true;
11156 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
11157 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
11163 /* Specify whether values returned in registers should be at the most
11164 significant end of a register. We want aggregates returned by
11165 value to match the way aggregates are passed to functions. */
11168 rs6000_return_in_msb (const_tree valtype
)
11170 return (DEFAULT_ABI
== ABI_ELFv2
11171 && BYTES_BIG_ENDIAN
11172 && AGGREGATE_TYPE_P (valtype
)
11173 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype
), valtype
) == upward
);
11176 #ifdef HAVE_AS_GNU_ATTRIBUTE
11177 /* Return TRUE if a call to function FNDECL may be one that
11178 potentially affects the function calling ABI of the object file. */
11181 call_ABI_of_interest (tree fndecl
)
11183 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
11185 struct cgraph_node
*c_node
;
11187 /* Libcalls are always interesting. */
11188 if (fndecl
== NULL_TREE
)
11191 /* Any call to an external function is interesting. */
11192 if (DECL_EXTERNAL (fndecl
))
11195 /* Interesting functions that we are emitting in this object file. */
11196 c_node
= cgraph_node::get (fndecl
);
11197 c_node
= c_node
->ultimate_alias_target ();
11198 return !c_node
->only_called_directly_p ();
11204 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11205 for a call to a function whose data type is FNTYPE.
11206 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11208 For incoming args we set the number of arguments in the prototype large
11209 so we never return a PARALLEL. */
11212 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
11213 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
11214 int libcall
, int n_named_args
,
11215 tree fndecl ATTRIBUTE_UNUSED
,
11216 machine_mode return_mode ATTRIBUTE_UNUSED
)
11218 static CUMULATIVE_ARGS zero_cumulative
;
11220 *cum
= zero_cumulative
;
11222 cum
->fregno
= FP_ARG_MIN_REG
;
11223 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
11224 cum
->prototype
= (fntype
&& prototype_p (fntype
));
11225 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
11226 ? CALL_LIBCALL
: CALL_NORMAL
);
11227 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
11228 cum
->stdarg
= stdarg_p (fntype
);
11229 cum
->libcall
= libcall
;
11231 cum
->nargs_prototype
= 0;
11232 if (incoming
|| cum
->prototype
)
11233 cum
->nargs_prototype
= n_named_args
;
11235 /* Check for a longcall attribute. */
11236 if ((!fntype
&& rs6000_default_long_calls
)
11238 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
11239 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
11240 cum
->call_cookie
|= CALL_LONG
;
11242 if (TARGET_DEBUG_ARG
)
11244 fprintf (stderr
, "\ninit_cumulative_args:");
11247 tree ret_type
= TREE_TYPE (fntype
);
11248 fprintf (stderr
, " ret code = %s,",
11249 get_tree_code_name (TREE_CODE (ret_type
)));
11252 if (cum
->call_cookie
& CALL_LONG
)
11253 fprintf (stderr
, " longcall,");
11255 fprintf (stderr
, " proto = %d, nargs = %d\n",
11256 cum
->prototype
, cum
->nargs_prototype
);
11259 #ifdef HAVE_AS_GNU_ATTRIBUTE
11260 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
11262 cum
->escapes
= call_ABI_of_interest (fndecl
);
11269 return_type
= TREE_TYPE (fntype
);
11270 return_mode
= TYPE_MODE (return_type
);
11273 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
11275 if (return_type
!= NULL
)
11277 if (TREE_CODE (return_type
) == RECORD_TYPE
11278 && TYPE_TRANSPARENT_AGGR (return_type
))
11280 return_type
= TREE_TYPE (first_field (return_type
));
11281 return_mode
= TYPE_MODE (return_type
);
11283 if (AGGREGATE_TYPE_P (return_type
)
11284 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
11286 rs6000_returns_struct
= true;
11288 if (SCALAR_FLOAT_MODE_P (return_mode
))
11290 rs6000_passes_float
= true;
11291 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11292 && (FLOAT128_IBM_P (return_mode
)
11293 || FLOAT128_IEEE_P (return_mode
)
11294 || (return_type
!= NULL
11295 && (TYPE_MAIN_VARIANT (return_type
)
11296 == long_double_type_node
))))
11297 rs6000_passes_long_double
= true;
11299 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
11300 || PAIRED_VECTOR_MODE (return_mode
))
11301 rs6000_passes_vector
= true;
11308 && TARGET_ALTIVEC_ABI
11309 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
11311 error ("cannot return value in vector register because"
11312 " altivec instructions are disabled, use %qs"
11313 " to enable them", "-maltivec");
11317 /* The mode the ABI uses for a word. This is not the same as word_mode
11318 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11320 static machine_mode
11321 rs6000_abi_word_mode (void)
11323 return TARGET_32BIT
? SImode
: DImode
;
11326 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11328 rs6000_offload_options (void)
11331 return xstrdup ("-foffload-abi=lp64");
11333 return xstrdup ("-foffload-abi=ilp32");
11336 /* On rs6000, function arguments are promoted, as are function return
11339 static machine_mode
11340 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
11342 int *punsignedp ATTRIBUTE_UNUSED
,
11345 PROMOTE_MODE (mode
, *punsignedp
, type
);
11350 /* Return true if TYPE must be passed on the stack and not in registers. */
11353 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
11355 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
11356 return must_pass_in_stack_var_size (mode
, type
);
11358 return must_pass_in_stack_var_size_or_pad (mode
, type
);
11362 is_complex_IBM_long_double (machine_mode mode
)
11364 return mode
== ICmode
|| (!TARGET_IEEEQUAD
&& mode
== TCmode
);
11367 /* Whether ABI_V4 passes MODE args to a function in floating point
11371 abi_v4_pass_in_fpr (machine_mode mode
)
11373 if (!TARGET_HARD_FLOAT
)
11375 if (TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
11377 if (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
11379 /* ABI_V4 passes complex IBM long double in 8 gprs.
11380 Stupid, but we can't change the ABI now. */
11381 if (is_complex_IBM_long_double (mode
))
11383 if (FLOAT128_2REG_P (mode
))
11385 if (DECIMAL_FLOAT_MODE_P (mode
))
11390 /* If defined, a C expression which determines whether, and in which
11391 direction, to pad out an argument with extra space. The value
11392 should be of type `enum direction': either `upward' to pad above
11393 the argument, `downward' to pad below, or `none' to inhibit
11396 For the AIX ABI structs are always stored left shifted in their
11400 function_arg_padding (machine_mode mode
, const_tree type
)
11402 #ifndef AGGREGATE_PADDING_FIXED
11403 #define AGGREGATE_PADDING_FIXED 0
11405 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11406 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11409 if (!AGGREGATE_PADDING_FIXED
)
11411 /* GCC used to pass structures of the same size as integer types as
11412 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11413 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11414 passed padded downward, except that -mstrict-align further
11415 muddied the water in that multi-component structures of 2 and 4
11416 bytes in size were passed padded upward.
11418 The following arranges for best compatibility with previous
11419 versions of gcc, but removes the -mstrict-align dependency. */
11420 if (BYTES_BIG_ENDIAN
)
11422 HOST_WIDE_INT size
= 0;
11424 if (mode
== BLKmode
)
11426 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
11427 size
= int_size_in_bytes (type
);
11430 size
= GET_MODE_SIZE (mode
);
11432 if (size
== 1 || size
== 2 || size
== 4)
11438 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
11440 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
11444 /* Fall back to the default. */
11445 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
11448 /* If defined, a C expression that gives the alignment boundary, in bits,
11449 of an argument with the specified mode and type. If it is not defined,
11450 PARM_BOUNDARY is used for all arguments.
11452 V.4 wants long longs and doubles to be double word aligned. Just
11453 testing the mode size is a boneheaded way to do this as it means
11454 that other types such as complex int are also double word aligned.
11455 However, we're stuck with this because changing the ABI might break
11456 existing library interfaces.
11458 Quadword align Altivec/VSX vectors.
11459 Quadword align large synthetic vector types. */
11461 static unsigned int
11462 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
11464 machine_mode elt_mode
;
11467 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11469 if (DEFAULT_ABI
== ABI_V4
11470 && (GET_MODE_SIZE (mode
) == 8
11471 || (TARGET_HARD_FLOAT
11472 && !is_complex_IBM_long_double (mode
)
11473 && FLOAT128_2REG_P (mode
))))
11475 else if (FLOAT128_VECTOR_P (mode
))
11477 else if (PAIRED_VECTOR_MODE (mode
)
11478 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11479 && int_size_in_bytes (type
) >= 8
11480 && int_size_in_bytes (type
) < 16))
11482 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11483 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11484 && int_size_in_bytes (type
) >= 16))
11487 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11488 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11489 -mcompat-align-parm is used. */
11490 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
11491 || DEFAULT_ABI
== ABI_ELFv2
)
11492 && type
&& TYPE_ALIGN (type
) > 64)
11494 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11495 or homogeneous float/vector aggregates here. We already handled
11496 vector aggregates above, but still need to check for float here. */
11497 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
11498 && !SCALAR_FLOAT_MODE_P (elt_mode
));
11500 /* We used to check for BLKmode instead of the above aggregate type
11501 check. Warn when this results in any difference to the ABI. */
11502 if (aggregate_p
!= (mode
== BLKmode
))
11504 static bool warned
;
11505 if (!warned
&& warn_psabi
)
11508 inform (input_location
,
11509 "the ABI of passing aggregates with %d-byte alignment"
11510 " has changed in GCC 5",
11511 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11519 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11520 implement the "aggregate type" check as a BLKmode check here; this
11521 means certain aggregate types are in fact not aligned. */
11522 if (TARGET_MACHO
&& rs6000_darwin64_abi
11524 && type
&& TYPE_ALIGN (type
) > 64)
11527 return PARM_BOUNDARY
;
11530 /* The offset in words to the start of the parameter save area. */
11532 static unsigned int
11533 rs6000_parm_offset (void)
11535 return (DEFAULT_ABI
== ABI_V4
? 2
11536 : DEFAULT_ABI
== ABI_ELFv2
? 4
11540 /* For a function parm of MODE and TYPE, return the starting word in
11541 the parameter area. NWORDS of the parameter area are already used. */
11543 static unsigned int
11544 rs6000_parm_start (machine_mode mode
, const_tree type
,
11545 unsigned int nwords
)
11547 unsigned int align
;
11549 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11550 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11553 /* Compute the size (in words) of a function argument. */
11555 static unsigned long
11556 rs6000_arg_size (machine_mode mode
, const_tree type
)
11558 unsigned long size
;
11560 if (mode
!= BLKmode
)
11561 size
= GET_MODE_SIZE (mode
);
11563 size
= int_size_in_bytes (type
);
11566 return (size
+ 3) >> 2;
11568 return (size
+ 7) >> 3;
11571 /* Use this to flush pending int fields. */
11574 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11575 HOST_WIDE_INT bitpos
, int final
)
11577 unsigned int startbit
, endbit
;
11578 int intregs
, intoffset
;
11581 /* Handle the situations where a float is taking up the first half
11582 of the GPR, and the other half is empty (typically due to
11583 alignment restrictions). We can detect this by a 8-byte-aligned
11584 int field, or by seeing that this is the final flush for this
11585 argument. Count the word and continue on. */
11586 if (cum
->floats_in_gpr
== 1
11587 && (cum
->intoffset
% 64 == 0
11588 || (cum
->intoffset
== -1 && final
)))
11591 cum
->floats_in_gpr
= 0;
11594 if (cum
->intoffset
== -1)
11597 intoffset
= cum
->intoffset
;
11598 cum
->intoffset
= -1;
11599 cum
->floats_in_gpr
= 0;
11601 if (intoffset
% BITS_PER_WORD
!= 0)
11603 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
11605 if (mode
== BLKmode
)
11607 /* We couldn't find an appropriate mode, which happens,
11608 e.g., in packed structs when there are 3 bytes to load.
11609 Back intoffset back to the beginning of the word in this
11611 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11615 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11616 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11617 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11618 cum
->words
+= intregs
;
11619 /* words should be unsigned. */
11620 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11622 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11627 /* The darwin64 ABI calls for us to recurse down through structs,
11628 looking for elements passed in registers. Unfortunately, we have
11629 to track int register count here also because of misalignments
11630 in powerpc alignment mode. */
11633 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11635 HOST_WIDE_INT startbitpos
)
11639 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11640 if (TREE_CODE (f
) == FIELD_DECL
)
11642 HOST_WIDE_INT bitpos
= startbitpos
;
11643 tree ftype
= TREE_TYPE (f
);
11645 if (ftype
== error_mark_node
)
11647 mode
= TYPE_MODE (ftype
);
11649 if (DECL_SIZE (f
) != 0
11650 && tree_fits_uhwi_p (bit_position (f
)))
11651 bitpos
+= int_bit_position (f
);
11653 /* ??? FIXME: else assume zero offset. */
11655 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11656 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11657 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11659 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11660 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11661 cum
->fregno
+= n_fpregs
;
11662 /* Single-precision floats present a special problem for
11663 us, because they are smaller than an 8-byte GPR, and so
11664 the structure-packing rules combined with the standard
11665 varargs behavior mean that we want to pack float/float
11666 and float/int combinations into a single register's
11667 space. This is complicated by the arg advance flushing,
11668 which works on arbitrarily large groups of int-type
11670 if (mode
== SFmode
)
11672 if (cum
->floats_in_gpr
== 1)
11674 /* Two floats in a word; count the word and reset
11675 the float count. */
11677 cum
->floats_in_gpr
= 0;
11679 else if (bitpos
% 64 == 0)
11681 /* A float at the beginning of an 8-byte word;
11682 count it and put off adjusting cum->words until
11683 we see if a arg advance flush is going to do it
11685 cum
->floats_in_gpr
++;
11689 /* The float is at the end of a word, preceded
11690 by integer fields, so the arg advance flush
11691 just above has already set cum->words and
11692 everything is taken care of. */
11696 cum
->words
+= n_fpregs
;
11698 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11700 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11704 else if (cum
->intoffset
== -1)
11705 cum
->intoffset
= bitpos
;
11709 /* Check for an item that needs to be considered specially under the darwin 64
11710 bit ABI. These are record types where the mode is BLK or the structure is
11711 8 bytes in size. */
11713 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11715 return rs6000_darwin64_abi
11716 && ((mode
== BLKmode
11717 && TREE_CODE (type
) == RECORD_TYPE
11718 && int_size_in_bytes (type
) > 0)
11719 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11720 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11723 /* Update the data in CUM to advance over an argument
11724 of mode MODE and data type TYPE.
11725 (TYPE is null for libcalls where that information may not be available.)
11727 Note that for args passed by reference, function_arg will be called
11728 with MODE and TYPE set to that of the pointer to the arg, not the arg
11732 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11733 const_tree type
, bool named
, int depth
)
11735 machine_mode elt_mode
;
11738 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11740 /* Only tick off an argument if we're not recursing. */
11742 cum
->nargs_prototype
--;
11744 #ifdef HAVE_AS_GNU_ATTRIBUTE
11745 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11748 if (SCALAR_FLOAT_MODE_P (mode
))
11750 rs6000_passes_float
= true;
11751 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11752 && (FLOAT128_IBM_P (mode
)
11753 || FLOAT128_IEEE_P (mode
)
11755 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11756 rs6000_passes_long_double
= true;
11758 if ((named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11759 || (PAIRED_VECTOR_MODE (mode
)
11761 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
))
11762 rs6000_passes_vector
= true;
11766 if (TARGET_ALTIVEC_ABI
11767 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11768 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11769 && int_size_in_bytes (type
) == 16)))
11771 bool stack
= false;
11773 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11775 cum
->vregno
+= n_elts
;
11777 if (!TARGET_ALTIVEC
)
11778 error ("cannot pass argument in vector register because"
11779 " altivec instructions are disabled, use %qs"
11780 " to enable them", "-maltivec");
11782 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11783 even if it is going to be passed in a vector register.
11784 Darwin does the same for variable-argument functions. */
11785 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11787 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11797 /* Vector parameters must be 16-byte aligned. In 32-bit
11798 mode this means we need to take into account the offset
11799 to the parameter save area. In 64-bit mode, they just
11800 have to start on an even word, since the parameter save
11801 area is 16-byte aligned. */
11803 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11805 align
= cum
->words
& 1;
11806 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11808 if (TARGET_DEBUG_ARG
)
11810 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11811 cum
->words
, align
);
11812 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11813 cum
->nargs_prototype
, cum
->prototype
,
11814 GET_MODE_NAME (mode
));
11818 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11820 int size
= int_size_in_bytes (type
);
11821 /* Variable sized types have size == -1 and are
11822 treated as if consisting entirely of ints.
11823 Pad to 16 byte boundary if needed. */
11824 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11825 && (cum
->words
% 2) != 0)
11827 /* For varargs, we can just go up by the size of the struct. */
11829 cum
->words
+= (size
+ 7) / 8;
11832 /* It is tempting to say int register count just goes up by
11833 sizeof(type)/8, but this is wrong in a case such as
11834 { int; double; int; } [powerpc alignment]. We have to
11835 grovel through the fields for these too. */
11836 cum
->intoffset
= 0;
11837 cum
->floats_in_gpr
= 0;
11838 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11839 rs6000_darwin64_record_arg_advance_flush (cum
,
11840 size
* BITS_PER_UNIT
, 1);
11842 if (TARGET_DEBUG_ARG
)
11844 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11845 cum
->words
, TYPE_ALIGN (type
), size
);
11847 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11848 cum
->nargs_prototype
, cum
->prototype
,
11849 GET_MODE_NAME (mode
));
11852 else if (DEFAULT_ABI
== ABI_V4
)
11854 if (abi_v4_pass_in_fpr (mode
))
11856 /* _Decimal128 must use an even/odd register pair. This assumes
11857 that the register number is odd when fregno is odd. */
11858 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11861 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11862 <= FP_ARG_V4_MAX_REG
)
11863 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11866 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11867 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11868 || mode
== DDmode
|| mode
== TDmode
)
11869 cum
->words
+= cum
->words
& 1;
11870 cum
->words
+= rs6000_arg_size (mode
, type
);
11875 int n_words
= rs6000_arg_size (mode
, type
);
11876 int gregno
= cum
->sysv_gregno
;
11878 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11879 As does any other 2 word item such as complex int due to a
11880 historical mistake. */
11882 gregno
+= (1 - gregno
) & 1;
11884 /* Multi-reg args are not split between registers and stack. */
11885 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11887 /* Long long is aligned on the stack. So are other 2 word
11888 items such as complex int due to a historical mistake. */
11890 cum
->words
+= cum
->words
& 1;
11891 cum
->words
+= n_words
;
11894 /* Note: continuing to accumulate gregno past when we've started
11895 spilling to the stack indicates the fact that we've started
11896 spilling to the stack to expand_builtin_saveregs. */
11897 cum
->sysv_gregno
= gregno
+ n_words
;
11900 if (TARGET_DEBUG_ARG
)
11902 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11903 cum
->words
, cum
->fregno
);
11904 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11905 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11906 fprintf (stderr
, "mode = %4s, named = %d\n",
11907 GET_MODE_NAME (mode
), named
);
11912 int n_words
= rs6000_arg_size (mode
, type
);
11913 int start_words
= cum
->words
;
11914 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11916 cum
->words
= align_words
+ n_words
;
11918 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11920 /* _Decimal128 must be passed in an even/odd float register pair.
11921 This assumes that the register number is odd when fregno is
11923 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11925 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11928 if (TARGET_DEBUG_ARG
)
11930 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11931 cum
->words
, cum
->fregno
);
11932 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11933 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11934 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11935 named
, align_words
- start_words
, depth
);
11941 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
11942 const_tree type
, bool named
)
11944 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
11948 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11949 structure between cum->intoffset and bitpos to integer registers. */
11952 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
11953 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
11956 unsigned int regno
;
11957 unsigned int startbit
, endbit
;
11958 int this_regno
, intregs
, intoffset
;
11961 if (cum
->intoffset
== -1)
11964 intoffset
= cum
->intoffset
;
11965 cum
->intoffset
= -1;
11967 /* If this is the trailing part of a word, try to only load that
11968 much into the register. Otherwise load the whole register. Note
11969 that in the latter case we may pick up unwanted bits. It's not a
11970 problem at the moment but may wish to revisit. */
11972 if (intoffset
% BITS_PER_WORD
!= 0)
11974 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
11976 if (mode
== BLKmode
)
11978 /* We couldn't find an appropriate mode, which happens,
11979 e.g., in packed structs when there are 3 bytes to load.
11980 Back intoffset back to the beginning of the word in this
11982 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11989 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11990 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11991 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11992 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
11994 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
11995 cum
->use_stack
= 1;
11997 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
12001 intoffset
/= BITS_PER_UNIT
;
12004 regno
= GP_ARG_MIN_REG
+ this_regno
;
12005 reg
= gen_rtx_REG (mode
, regno
);
12007 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
12010 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
12014 while (intregs
> 0);
12017 /* Recursive workhorse for the following. */
12020 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
12021 HOST_WIDE_INT startbitpos
, rtx rvec
[],
12026 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
12027 if (TREE_CODE (f
) == FIELD_DECL
)
12029 HOST_WIDE_INT bitpos
= startbitpos
;
12030 tree ftype
= TREE_TYPE (f
);
12032 if (ftype
== error_mark_node
)
12034 mode
= TYPE_MODE (ftype
);
12036 if (DECL_SIZE (f
) != 0
12037 && tree_fits_uhwi_p (bit_position (f
)))
12038 bitpos
+= int_bit_position (f
);
12040 /* ??? FIXME: else assume zero offset. */
12042 if (TREE_CODE (ftype
) == RECORD_TYPE
)
12043 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
12044 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
12046 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
12050 case SCmode
: mode
= SFmode
; break;
12051 case DCmode
: mode
= DFmode
; break;
12052 case TCmode
: mode
= TFmode
; break;
12056 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12057 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
12059 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
12060 && (mode
== TFmode
|| mode
== TDmode
));
12061 /* Long double or _Decimal128 split over regs and memory. */
12062 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
12066 = gen_rtx_EXPR_LIST (VOIDmode
,
12067 gen_rtx_REG (mode
, cum
->fregno
++),
12068 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12069 if (FLOAT128_2REG_P (mode
))
12072 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
12074 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
12076 = gen_rtx_EXPR_LIST (VOIDmode
,
12077 gen_rtx_REG (mode
, cum
->vregno
++),
12078 GEN_INT (bitpos
/ BITS_PER_UNIT
));
12080 else if (cum
->intoffset
== -1)
12081 cum
->intoffset
= bitpos
;
12085 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12086 the register(s) to be used for each field and subfield of a struct
12087 being passed by value, along with the offset of where the
12088 register's value may be found in the block. FP fields go in FP
12089 register, vector fields go in vector registers, and everything
12090 else goes in int registers, packed as in memory.
12092 This code is also used for function return values. RETVAL indicates
12093 whether this is the case.
12095 Much of this is taken from the SPARC V9 port, which has a similar
12096 calling convention. */
12099 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
12100 bool named
, bool retval
)
12102 rtx rvec
[FIRST_PSEUDO_REGISTER
];
12103 int k
= 1, kbase
= 1;
12104 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
12105 /* This is a copy; modifications are not visible to our caller. */
12106 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
12107 CUMULATIVE_ARGS
*cum
= ©_cum
;
12109 /* Pad to 16 byte boundary if needed. */
12110 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
12111 && (cum
->words
% 2) != 0)
12114 cum
->intoffset
= 0;
12115 cum
->use_stack
= 0;
12116 cum
->named
= named
;
12118 /* Put entries into rvec[] for individual FP and vector fields, and
12119 for the chunks of memory that go in int regs. Note we start at
12120 element 1; 0 is reserved for an indication of using memory, and
12121 may or may not be filled in below. */
12122 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
12123 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
12125 /* If any part of the struct went on the stack put all of it there.
12126 This hack is because the generic code for
12127 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12128 parts of the struct are not at the beginning. */
12129 if (cum
->use_stack
)
12132 return NULL_RTX
; /* doesn't go in registers at all */
12134 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12136 if (k
> 1 || cum
->use_stack
)
12137 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
12142 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12145 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
12150 rtx rvec
[GP_ARG_NUM_REG
+ 1];
12152 if (align_words
>= GP_ARG_NUM_REG
)
12155 n_units
= rs6000_arg_size (mode
, type
);
12157 /* Optimize the simple case where the arg fits in one gpr, except in
12158 the case of BLKmode due to assign_parms assuming that registers are
12159 BITS_PER_WORD wide. */
12161 || (n_units
== 1 && mode
!= BLKmode
))
12162 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12165 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
12166 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12167 using a magic NULL_RTX component.
12168 This is not strictly correct. Only some of the arg belongs in
12169 memory, not all of it. However, the normal scheme using
12170 function_arg_partial_nregs can result in unusual subregs, eg.
12171 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12172 store the whole arg to memory is often more efficient than code
12173 to store pieces, and we know that space is available in the right
12174 place for the whole arg. */
12175 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12180 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
12181 rtx off
= GEN_INT (i
++ * 4);
12182 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12184 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
12186 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12189 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12190 but must also be copied into the parameter save area starting at
12191 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12192 to the GPRs and/or memory. Return the number of elements used. */
12195 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
12196 int align_words
, rtx
*rvec
)
12200 if (align_words
< GP_ARG_NUM_REG
)
12202 int n_words
= rs6000_arg_size (mode
, type
);
12204 if (align_words
+ n_words
> GP_ARG_NUM_REG
12206 || (TARGET_32BIT
&& TARGET_POWERPC64
))
12208 /* If this is partially on the stack, then we only
12209 include the portion actually in registers here. */
12210 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12213 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12215 /* Not all of the arg fits in gprs. Say that it goes in memory
12216 too, using a magic NULL_RTX component. Also see comment in
12217 rs6000_mixed_function_arg for why the normal
12218 function_arg_partial_nregs scheme doesn't work in this case. */
12219 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12224 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12225 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
12226 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12228 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12232 /* The whole arg fits in gprs. */
12233 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12234 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
12239 /* It's entirely in memory. */
12240 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12246 /* RVEC is a vector of K components of an argument of mode MODE.
12247 Construct the final function_arg return value from it. */
12250 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
12252 gcc_assert (k
>= 1);
12254 /* Avoid returning a PARALLEL in the trivial cases. */
12257 if (XEXP (rvec
[0], 0) == NULL_RTX
)
12260 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
12261 return XEXP (rvec
[0], 0);
12264 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12267 /* Determine where to put an argument to a function.
12268 Value is zero to push the argument on the stack,
12269 or a hard register in which to store the argument.
12271 MODE is the argument's machine mode.
12272 TYPE is the data type of the argument (as a tree).
12273 This is null for libcalls where that information may
12275 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12276 the preceding args and about the function being called. It is
12277 not modified in this routine.
12278 NAMED is nonzero if this argument is a named parameter
12279 (otherwise it is an extra parameter matching an ellipsis).
12281 On RS/6000 the first eight words of non-FP are normally in registers
12282 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12283 Under V.4, the first 8 FP args are in registers.
12285 If this is floating-point and no prototype is specified, we use
12286 both an FP and integer register (or possibly FP reg and stack). Library
12287 functions (when CALL_LIBCALL is set) always have the proper types for args,
12288 so we can pass the FP value just in one register. emit_library_function
12289 doesn't support PARALLEL anyway.
12291 Note that for args passed by reference, function_arg will be called
12292 with MODE and TYPE set to that of the pointer to the arg, not the arg
12296 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
12297 const_tree type
, bool named
)
12299 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12300 enum rs6000_abi abi
= DEFAULT_ABI
;
12301 machine_mode elt_mode
;
12304 /* Return a marker to indicate whether CR1 needs to set or clear the
12305 bit that V.4 uses to say fp args were passed in registers.
12306 Assume that we don't need the marker for software floating point,
12307 or compiler generated library calls. */
12308 if (mode
== VOIDmode
)
12311 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
12313 || (cum
->nargs_prototype
< 0
12314 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
12315 && TARGET_HARD_FLOAT
)
12316 return GEN_INT (cum
->call_cookie
12317 | ((cum
->fregno
== FP_ARG_MIN_REG
)
12318 ? CALL_V4_SET_FP_ARGS
12319 : CALL_V4_CLEAR_FP_ARGS
));
12321 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
12324 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12326 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12328 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
12329 if (rslt
!= NULL_RTX
)
12331 /* Else fall through to usual handling. */
12334 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12336 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12340 /* Do we also need to pass this argument in the parameter save area?
12341 Library support functions for IEEE 128-bit are assumed to not need the
12342 value passed both in GPRs and in vector registers. */
12343 if (TARGET_64BIT
&& !cum
->prototype
12344 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12346 int align_words
= ROUND_UP (cum
->words
, 2);
12347 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12350 /* Describe where this argument goes in the vector registers. */
12351 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
12353 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
12354 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12355 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12358 return rs6000_finish_function_arg (mode
, rvec
, k
);
12360 else if (TARGET_ALTIVEC_ABI
12361 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
12362 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
12363 && int_size_in_bytes (type
) == 16)))
12365 if (named
|| abi
== ABI_V4
)
12369 /* Vector parameters to varargs functions under AIX or Darwin
12370 get passed in memory and possibly also in GPRs. */
12371 int align
, align_words
, n_words
;
12372 machine_mode part_mode
;
12374 /* Vector parameters must be 16-byte aligned. In 32-bit
12375 mode this means we need to take into account the offset
12376 to the parameter save area. In 64-bit mode, they just
12377 have to start on an even word, since the parameter save
12378 area is 16-byte aligned. */
12380 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
12382 align
= cum
->words
& 1;
12383 align_words
= cum
->words
+ align
;
12385 /* Out of registers? Memory, then. */
12386 if (align_words
>= GP_ARG_NUM_REG
)
12389 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12390 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12392 /* The vector value goes in GPRs. Only the part of the
12393 value in GPRs is reported here. */
12395 n_words
= rs6000_arg_size (mode
, type
);
12396 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12397 /* Fortunately, there are only two possibilities, the value
12398 is either wholly in GPRs or half in GPRs and half not. */
12399 part_mode
= DImode
;
12401 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
12405 else if (abi
== ABI_V4
)
12407 if (abi_v4_pass_in_fpr (mode
))
12409 /* _Decimal128 must use an even/odd register pair. This assumes
12410 that the register number is odd when fregno is odd. */
12411 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12414 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
12415 <= FP_ARG_V4_MAX_REG
)
12416 return gen_rtx_REG (mode
, cum
->fregno
);
12422 int n_words
= rs6000_arg_size (mode
, type
);
12423 int gregno
= cum
->sysv_gregno
;
12425 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12426 As does any other 2 word item such as complex int due to a
12427 historical mistake. */
12429 gregno
+= (1 - gregno
) & 1;
12431 /* Multi-reg args are not split between registers and stack. */
12432 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
12435 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12436 return rs6000_mixed_function_arg (mode
, type
,
12437 gregno
- GP_ARG_MIN_REG
);
12438 return gen_rtx_REG (mode
, gregno
);
12443 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12445 /* _Decimal128 must be passed in an even/odd float register pair.
12446 This assumes that the register number is odd when fregno is odd. */
12447 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12450 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12452 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12455 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12458 /* Do we also need to pass this argument in the parameter
12460 if (type
&& (cum
->nargs_prototype
<= 0
12461 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12462 && TARGET_XL_COMPAT
12463 && align_words
>= GP_ARG_NUM_REG
)))
12464 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12466 /* Describe where this argument goes in the fprs. */
12467 for (i
= 0; i
< n_elts
12468 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
12470 /* Check if the argument is split over registers and memory.
12471 This can only ever happen for long double or _Decimal128;
12472 complex types are handled via split_complex_arg. */
12473 machine_mode fmode
= elt_mode
;
12474 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
12476 gcc_assert (FLOAT128_2REG_P (fmode
));
12477 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
12480 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
12481 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12482 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12485 /* If there were not enough FPRs to hold the argument, the rest
12486 usually goes into memory. However, if the current position
12487 is still within the register parameter area, a portion may
12488 actually have to go into GPRs.
12490 Note that it may happen that the portion of the argument
12491 passed in the first "half" of the first GPR was already
12492 passed in the last FPR as well.
12494 For unnamed arguments, we already set up GPRs to cover the
12495 whole argument in rs6000_psave_function_arg, so there is
12496 nothing further to do at this point. */
12497 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
12498 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
12499 && cum
->nargs_prototype
> 0)
12501 static bool warned
;
12503 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12504 int n_words
= rs6000_arg_size (mode
, type
);
12506 align_words
+= fpr_words
;
12507 n_words
-= fpr_words
;
12511 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12512 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12513 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12515 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12517 if (!warned
&& warn_psabi
)
12520 inform (input_location
,
12521 "the ABI of passing homogeneous float aggregates"
12522 " has changed in GCC 5");
12526 return rs6000_finish_function_arg (mode
, rvec
, k
);
12528 else if (align_words
< GP_ARG_NUM_REG
)
12530 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12531 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12533 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12540 /* For an arg passed partly in registers and partly in memory, this is
12541 the number of bytes passed in registers. For args passed entirely in
12542 registers or entirely in memory, zero. When an arg is described by a
12543 PARALLEL, perhaps using more than one register type, this function
12544 returns the number of bytes used by the first element of the PARALLEL. */
12547 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12548 tree type
, bool named
)
12550 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12551 bool passed_in_gprs
= true;
12554 machine_mode elt_mode
;
12557 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12559 if (DEFAULT_ABI
== ABI_V4
)
12562 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12564 /* If we are passing this arg in the fixed parameter save area (gprs or
12565 memory) as well as VRs, we do not use the partial bytes mechanism;
12566 instead, rs6000_function_arg will return a PARALLEL including a memory
12567 element as necessary. Library support functions for IEEE 128-bit are
12568 assumed to not need the value passed both in GPRs and in vector
12570 if (TARGET_64BIT
&& !cum
->prototype
12571 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12574 /* Otherwise, we pass in VRs only. Check for partial copies. */
12575 passed_in_gprs
= false;
12576 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12577 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12580 /* In this complicated case we just disable the partial_nregs code. */
12581 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12584 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12586 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12588 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12590 /* If we are passing this arg in the fixed parameter save area
12591 (gprs or memory) as well as FPRs, we do not use the partial
12592 bytes mechanism; instead, rs6000_function_arg will return a
12593 PARALLEL including a memory element as necessary. */
12595 && (cum
->nargs_prototype
<= 0
12596 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12597 && TARGET_XL_COMPAT
12598 && align_words
>= GP_ARG_NUM_REG
)))
12601 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12602 passed_in_gprs
= false;
12603 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12605 /* Compute number of bytes / words passed in FPRs. If there
12606 is still space available in the register parameter area
12607 *after* that amount, a part of the argument will be passed
12608 in GPRs. In that case, the total amount passed in any
12609 registers is equal to the amount that would have been passed
12610 in GPRs if everything were passed there, so we fall back to
12611 the GPR code below to compute the appropriate value. */
12612 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12613 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12614 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12616 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12617 passed_in_gprs
= true;
12624 && align_words
< GP_ARG_NUM_REG
12625 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12626 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12628 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12629 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12634 /* A C expression that indicates when an argument must be passed by
12635 reference. If nonzero for an argument, a copy of that argument is
12636 made in memory and a pointer to the argument is passed instead of
12637 the argument itself. The pointer is passed in whatever way is
12638 appropriate for passing a pointer to that type.
12640 Under V.4, aggregates and long double are passed by reference.
12642 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12643 reference unless the AltiVec vector extension ABI is in force.
12645 As an extension to all ABIs, variable sized types are passed by
12649 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12650 machine_mode mode
, const_tree type
,
12651 bool named ATTRIBUTE_UNUSED
)
12656 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12657 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12659 if (TARGET_DEBUG_ARG
)
12660 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12664 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12666 if (TARGET_DEBUG_ARG
)
12667 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12671 if (int_size_in_bytes (type
) < 0)
12673 if (TARGET_DEBUG_ARG
)
12674 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12678 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12679 modes only exist for GCC vector types if -maltivec. */
12680 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12682 if (TARGET_DEBUG_ARG
)
12683 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12687 /* Pass synthetic vectors in memory. */
12688 if (TREE_CODE (type
) == VECTOR_TYPE
12689 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12691 static bool warned_for_pass_big_vectors
= false;
12692 if (TARGET_DEBUG_ARG
)
12693 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12694 if (!warned_for_pass_big_vectors
)
12696 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12697 "non-standard ABI extension with no compatibility "
12699 warned_for_pass_big_vectors
= true;
12707 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12708 already processes. Return true if the parameter must be passed
12709 (fully or partially) on the stack. */
12712 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12718 /* Catch errors. */
12719 if (type
== NULL
|| type
== error_mark_node
)
12722 /* Handle types with no storage requirement. */
12723 if (TYPE_MODE (type
) == VOIDmode
)
12726 /* Handle complex types. */
12727 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12728 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12729 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12731 /* Handle transparent aggregates. */
12732 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12733 && TYPE_TRANSPARENT_AGGR (type
))
12734 type
= TREE_TYPE (first_field (type
));
12736 /* See if this arg was passed by invisible reference. */
12737 if (pass_by_reference (get_cumulative_args (args_so_far
),
12738 TYPE_MODE (type
), type
, true))
12739 type
= build_pointer_type (type
);
12741 /* Find mode as it is passed by the ABI. */
12742 unsignedp
= TYPE_UNSIGNED (type
);
12743 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12745 /* If we must pass in stack, we need a stack. */
12746 if (rs6000_must_pass_in_stack (mode
, type
))
12749 /* If there is no incoming register, we need a stack. */
12750 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12751 if (entry_parm
== NULL
)
12754 /* Likewise if we need to pass both in registers and on the stack. */
12755 if (GET_CODE (entry_parm
) == PARALLEL
12756 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12759 /* Also true if we're partially in registers and partially not. */
12760 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12763 /* Update info on where next arg arrives in registers. */
12764 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12768 /* Return true if FUN has no prototype, has a variable argument
12769 list, or passes any parameter in memory. */
12772 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12774 tree fntype
, result
;
12775 CUMULATIVE_ARGS args_so_far_v
;
12776 cumulative_args_t args_so_far
;
12779 /* Must be a libcall, all of which only use reg parms. */
12784 fntype
= TREE_TYPE (fun
);
12786 /* Varargs functions need the parameter save area. */
12787 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12790 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12791 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12793 /* When incoming, we will have been passed the function decl.
12794 It is necessary to use the decl to handle K&R style functions,
12795 where TYPE_ARG_TYPES may not be available. */
12798 gcc_assert (DECL_P (fun
));
12799 result
= DECL_RESULT (fun
);
12802 result
= TREE_TYPE (fntype
);
12804 if (result
&& aggregate_value_p (result
, fntype
))
12806 if (!TYPE_P (result
))
12807 result
= TREE_TYPE (result
);
12808 result
= build_pointer_type (result
);
12809 rs6000_parm_needs_stack (args_so_far
, result
);
12816 for (parm
= DECL_ARGUMENTS (fun
);
12817 parm
&& parm
!= void_list_node
;
12818 parm
= TREE_CHAIN (parm
))
12819 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12824 function_args_iterator args_iter
;
12827 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12828 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12835 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12836 usually a constant depending on the ABI. However, in the ELFv2 ABI
12837 the register parameter area is optional when calling a function that
12838 has a prototype is scope, has no variable argument list, and passes
12839 all parameters in registers. */
12842 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12844 int reg_parm_stack_space
;
12846 switch (DEFAULT_ABI
)
12849 reg_parm_stack_space
= 0;
12854 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12858 /* ??? Recomputing this every time is a bit expensive. Is there
12859 a place to cache this information? */
12860 if (rs6000_function_parms_need_stack (fun
, incoming
))
12861 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12863 reg_parm_stack_space
= 0;
12867 return reg_parm_stack_space
;
12871 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12874 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12879 for (i
= 0; i
< nregs
; i
++)
12881 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12882 if (reload_completed
)
12884 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12887 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12888 i
* GET_MODE_SIZE (reg_mode
));
12891 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12895 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12899 /* Perform any needed actions needed for a function that is receiving a
12900 variable number of arguments.
12904 MODE and TYPE are the mode and type of the current parameter.
12906 PRETEND_SIZE is a variable that should be set to the amount of stack
12907 that must be pushed by the prolog to pretend that our caller pushed
12910 Normally, this macro will push all remaining incoming registers on the
12911 stack and set PRETEND_SIZE to the length of the registers pushed. */
12914 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12915 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12918 CUMULATIVE_ARGS next_cum
;
12919 int reg_size
= TARGET_32BIT
? 4 : 8;
12920 rtx save_area
= NULL_RTX
, mem
;
12921 int first_reg_offset
;
12922 alias_set_type set
;
12924 /* Skip the last named argument. */
12925 next_cum
= *get_cumulative_args (cum
);
12926 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12928 if (DEFAULT_ABI
== ABI_V4
)
12930 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12934 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12935 HOST_WIDE_INT offset
= 0;
12937 /* Try to optimize the size of the varargs save area.
12938 The ABI requires that ap.reg_save_area is doubleword
12939 aligned, but we don't need to allocate space for all
12940 the bytes, only those to which we actually will save
12942 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
12943 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
12944 if (TARGET_HARD_FLOAT
12945 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12946 && cfun
->va_list_fpr_size
)
12949 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
12950 * UNITS_PER_FP_WORD
;
12951 if (cfun
->va_list_fpr_size
12952 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12953 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
12955 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12956 * UNITS_PER_FP_WORD
;
12960 offset
= -((first_reg_offset
* reg_size
) & ~7);
12961 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
12963 gpr_reg_num
= cfun
->va_list_gpr_size
;
12964 if (reg_size
== 4 && (first_reg_offset
& 1))
12967 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
12970 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
12971 * UNITS_PER_FP_WORD
12972 - (int) (GP_ARG_NUM_REG
* reg_size
);
12974 if (gpr_size
+ fpr_size
)
12977 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
12978 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
12979 reg_save_area
= XEXP (reg_save_area
, 0);
12980 if (GET_CODE (reg_save_area
) == PLUS
)
12982 gcc_assert (XEXP (reg_save_area
, 0)
12983 == virtual_stack_vars_rtx
);
12984 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
12985 offset
+= INTVAL (XEXP (reg_save_area
, 1));
12988 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
12991 cfun
->machine
->varargs_save_offset
= offset
;
12992 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
12997 first_reg_offset
= next_cum
.words
;
12998 save_area
= crtl
->args
.internal_arg_pointer
;
13000 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
13001 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
13004 set
= get_varargs_alias_set ();
13005 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
13006 && cfun
->va_list_gpr_size
)
13008 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
13010 if (va_list_gpr_counter_field
)
13011 /* V4 va_list_gpr_size counts number of registers needed. */
13012 n_gpr
= cfun
->va_list_gpr_size
;
13014 /* char * va_list instead counts number of bytes needed. */
13015 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
13020 mem
= gen_rtx_MEM (BLKmode
,
13021 plus_constant (Pmode
, save_area
,
13022 first_reg_offset
* reg_size
));
13023 MEM_NOTRAP_P (mem
) = 1;
13024 set_mem_alias_set (mem
, set
);
13025 set_mem_align (mem
, BITS_PER_WORD
);
13027 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
13031 /* Save FP registers if needed. */
13032 if (DEFAULT_ABI
== ABI_V4
13033 && TARGET_HARD_FLOAT
13035 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
13036 && cfun
->va_list_fpr_size
)
13038 int fregno
= next_cum
.fregno
, nregs
;
13039 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
13040 rtx lab
= gen_label_rtx ();
13041 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
13042 * UNITS_PER_FP_WORD
);
13045 (gen_rtx_SET (pc_rtx
,
13046 gen_rtx_IF_THEN_ELSE (VOIDmode
,
13047 gen_rtx_NE (VOIDmode
, cr1
,
13049 gen_rtx_LABEL_REF (VOIDmode
, lab
),
13053 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
13054 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
13056 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13058 plus_constant (Pmode
, save_area
, off
));
13059 MEM_NOTRAP_P (mem
) = 1;
13060 set_mem_alias_set (mem
, set
);
13061 set_mem_align (mem
, GET_MODE_ALIGNMENT (
13062 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13063 ? DFmode
: SFmode
));
13064 emit_move_insn (mem
, gen_rtx_REG (
13065 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
13066 ? DFmode
: SFmode
, fregno
));
13073 /* Create the va_list data type. */
13076 rs6000_build_builtin_va_list (void)
13078 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
13080 /* For AIX, prefer 'char *' because that's what the system
13081 header files like. */
13082 if (DEFAULT_ABI
!= ABI_V4
)
13083 return build_pointer_type (char_type_node
);
13085 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
13086 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
13087 get_identifier ("__va_list_tag"), record
);
13089 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
13090 unsigned_char_type_node
);
13091 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
13092 unsigned_char_type_node
);
13093 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13094 every user file. */
13095 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13096 get_identifier ("reserved"), short_unsigned_type_node
);
13097 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13098 get_identifier ("overflow_arg_area"),
13100 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
13101 get_identifier ("reg_save_area"),
13104 va_list_gpr_counter_field
= f_gpr
;
13105 va_list_fpr_counter_field
= f_fpr
;
13107 DECL_FIELD_CONTEXT (f_gpr
) = record
;
13108 DECL_FIELD_CONTEXT (f_fpr
) = record
;
13109 DECL_FIELD_CONTEXT (f_res
) = record
;
13110 DECL_FIELD_CONTEXT (f_ovf
) = record
;
13111 DECL_FIELD_CONTEXT (f_sav
) = record
;
13113 TYPE_STUB_DECL (record
) = type_decl
;
13114 TYPE_NAME (record
) = type_decl
;
13115 TYPE_FIELDS (record
) = f_gpr
;
13116 DECL_CHAIN (f_gpr
) = f_fpr
;
13117 DECL_CHAIN (f_fpr
) = f_res
;
13118 DECL_CHAIN (f_res
) = f_ovf
;
13119 DECL_CHAIN (f_ovf
) = f_sav
;
13121 layout_type (record
);
13123 /* The correct type is an array type of one element. */
13124 return build_array_type (record
, build_index_type (size_zero_node
));
13127 /* Implement va_start. */
13130 rs6000_va_start (tree valist
, rtx nextarg
)
13132 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
13133 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13134 tree gpr
, fpr
, ovf
, sav
, t
;
13136 /* Only SVR4 needs something special. */
13137 if (DEFAULT_ABI
!= ABI_V4
)
13139 std_expand_builtin_va_start (valist
, nextarg
);
13143 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13144 f_fpr
= DECL_CHAIN (f_gpr
);
13145 f_res
= DECL_CHAIN (f_fpr
);
13146 f_ovf
= DECL_CHAIN (f_res
);
13147 f_sav
= DECL_CHAIN (f_ovf
);
13149 valist
= build_simple_mem_ref (valist
);
13150 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13151 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13153 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13155 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13158 /* Count number of gp and fp argument registers used. */
13159 words
= crtl
->args
.info
.words
;
13160 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
13162 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
13165 if (TARGET_DEBUG_ARG
)
13166 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
13167 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
13168 words
, n_gpr
, n_fpr
);
13170 if (cfun
->va_list_gpr_size
)
13172 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
13173 build_int_cst (NULL_TREE
, n_gpr
));
13174 TREE_SIDE_EFFECTS (t
) = 1;
13175 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13178 if (cfun
->va_list_fpr_size
)
13180 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
13181 build_int_cst (NULL_TREE
, n_fpr
));
13182 TREE_SIDE_EFFECTS (t
) = 1;
13183 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13185 #ifdef HAVE_AS_GNU_ATTRIBUTE
13186 if (call_ABI_of_interest (cfun
->decl
))
13187 rs6000_passes_float
= true;
13191 /* Find the overflow area. */
13192 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
13194 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
13195 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
13196 TREE_SIDE_EFFECTS (t
) = 1;
13197 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13199 /* If there were no va_arg invocations, don't set up the register
13201 if (!cfun
->va_list_gpr_size
13202 && !cfun
->va_list_fpr_size
13203 && n_gpr
< GP_ARG_NUM_REG
13204 && n_fpr
< FP_ARG_V4_MAX_REG
)
13207 /* Find the register save area. */
13208 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
13209 if (cfun
->machine
->varargs_save_offset
)
13210 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
13211 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
13212 TREE_SIDE_EFFECTS (t
) = 1;
13213 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13216 /* Implement va_arg. */
13219 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
13220 gimple_seq
*post_p
)
13222 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13223 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
13224 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
13225 tree lab_false
, lab_over
, addr
;
13227 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
13231 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
13233 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
13234 return build_va_arg_indirect_ref (t
);
13237 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13238 earlier version of gcc, with the property that it always applied alignment
13239 adjustments to the va-args (even for zero-sized types). The cheapest way
13240 to deal with this is to replicate the effect of the part of
13241 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13243 We don't need to check for pass-by-reference because of the test above.
13244 We can return a simplifed answer, since we know there's no offset to add. */
13247 && rs6000_darwin64_abi
)
13248 || DEFAULT_ABI
== ABI_ELFv2
13249 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
13250 && integer_zerop (TYPE_SIZE (type
)))
13252 unsigned HOST_WIDE_INT align
, boundary
;
13253 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
13254 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
13255 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
13256 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
13257 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
13258 boundary
/= BITS_PER_UNIT
;
13259 if (boundary
> align
)
13262 /* This updates arg ptr by the amount that would be necessary
13263 to align the zero-sized (but not zero-alignment) item. */
13264 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13265 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
13266 gimplify_and_add (t
, pre_p
);
13268 t
= fold_convert (sizetype
, valist_tmp
);
13269 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13270 fold_convert (TREE_TYPE (valist
),
13271 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
13272 size_int (-boundary
))));
13273 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
13274 gimplify_and_add (t
, pre_p
);
13276 /* Since it is zero-sized there's no increment for the item itself. */
13277 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
13278 return build_va_arg_indirect_ref (valist_tmp
);
13281 if (DEFAULT_ABI
!= ABI_V4
)
13283 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
13285 tree elem_type
= TREE_TYPE (type
);
13286 machine_mode elem_mode
= TYPE_MODE (elem_type
);
13287 int elem_size
= GET_MODE_SIZE (elem_mode
);
13289 if (elem_size
< UNITS_PER_WORD
)
13291 tree real_part
, imag_part
;
13292 gimple_seq post
= NULL
;
13294 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13296 /* Copy the value into a temporary, lest the formal temporary
13297 be reused out from under us. */
13298 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
13299 gimple_seq_add_seq (pre_p
, post
);
13301 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13304 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
13308 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
13311 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13312 f_fpr
= DECL_CHAIN (f_gpr
);
13313 f_res
= DECL_CHAIN (f_fpr
);
13314 f_ovf
= DECL_CHAIN (f_res
);
13315 f_sav
= DECL_CHAIN (f_ovf
);
13317 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13318 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13320 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13322 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13325 size
= int_size_in_bytes (type
);
13326 rsize
= (size
+ 3) / 4;
13327 int pad
= 4 * rsize
- size
;
13330 machine_mode mode
= TYPE_MODE (type
);
13331 if (abi_v4_pass_in_fpr (mode
))
13333 /* FP args go in FP registers, if present. */
13335 n_reg
= (size
+ 7) / 8;
13336 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
13337 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
13338 if (mode
!= SFmode
&& mode
!= SDmode
)
13343 /* Otherwise into GP registers. */
13352 /* Pull the value out of the saved registers.... */
13355 addr
= create_tmp_var (ptr_type_node
, "addr");
13357 /* AltiVec vectors never go in registers when -mabi=altivec. */
13358 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
13362 lab_false
= create_artificial_label (input_location
);
13363 lab_over
= create_artificial_label (input_location
);
13365 /* Long long is aligned in the registers. As are any other 2 gpr
13366 item such as complex int due to a historical mistake. */
13368 if (n_reg
== 2 && reg
== gpr
)
13371 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13372 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
13373 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
13374 unshare_expr (reg
), u
);
13376 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13377 reg number is 0 for f1, so we want to make it odd. */
13378 else if (reg
== fpr
&& mode
== TDmode
)
13380 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13381 build_int_cst (TREE_TYPE (reg
), 1));
13382 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
13385 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
13386 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
13387 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
13388 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
13389 gimplify_and_add (t
, pre_p
);
13393 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
13395 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13396 build_int_cst (TREE_TYPE (reg
), n_reg
));
13397 u
= fold_convert (sizetype
, u
);
13398 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
13399 t
= fold_build_pointer_plus (t
, u
);
13401 /* _Decimal32 varargs are located in the second word of the 64-bit
13402 FP register for 32-bit binaries. */
13403 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
13404 t
= fold_build_pointer_plus_hwi (t
, size
);
13406 /* Args are passed right-aligned. */
13407 if (BYTES_BIG_ENDIAN
)
13408 t
= fold_build_pointer_plus_hwi (t
, pad
);
13410 gimplify_assign (addr
, t
, pre_p
);
13412 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
13414 stmt
= gimple_build_label (lab_false
);
13415 gimple_seq_add_stmt (pre_p
, stmt
);
13417 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
13419 /* Ensure that we don't find any more args in regs.
13420 Alignment has taken care of for special cases. */
13421 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
13425 /* ... otherwise out of the overflow area. */
13427 /* Care for on-stack alignment if needed. */
13431 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
13432 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
13433 build_int_cst (TREE_TYPE (t
), -align
));
13436 /* Args are passed right-aligned. */
13437 if (BYTES_BIG_ENDIAN
)
13438 t
= fold_build_pointer_plus_hwi (t
, pad
);
13440 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
13442 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
13444 t
= fold_build_pointer_plus_hwi (t
, size
);
13445 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
13449 stmt
= gimple_build_label (lab_over
);
13450 gimple_seq_add_stmt (pre_p
, stmt
);
13453 if (STRICT_ALIGNMENT
13454 && (TYPE_ALIGN (type
)
13455 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
13457 /* The value (of type complex double, for example) may not be
13458 aligned in memory in the saved registers, so copy via a
13459 temporary. (This is the same code as used for SPARC.) */
13460 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
13461 tree dest_addr
= build_fold_addr_expr (tmp
);
13463 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
13464 3, dest_addr
, addr
, size_int (rsize
* 4));
13466 gimplify_and_add (copy
, pre_p
);
13470 addr
= fold_convert (ptrtype
, addr
);
13471 return build_va_arg_indirect_ref (addr
);
13477 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
13480 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
13481 const char *attr_string
= "";
13483 gcc_assert (name
!= NULL
);
13484 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
13486 if (rs6000_builtin_decls
[(int)code
])
13487 fatal_error (input_location
,
13488 "internal error: builtin function %qs already processed",
13491 rs6000_builtin_decls
[(int)code
] = t
=
13492 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
13494 /* Set any special attributes. */
13495 if ((classify
& RS6000_BTC_CONST
) != 0)
13497 /* const function, function only depends on the inputs. */
13498 TREE_READONLY (t
) = 1;
13499 TREE_NOTHROW (t
) = 1;
13500 attr_string
= ", const";
13502 else if ((classify
& RS6000_BTC_PURE
) != 0)
13504 /* pure function, function can read global memory, but does not set any
13506 DECL_PURE_P (t
) = 1;
13507 TREE_NOTHROW (t
) = 1;
13508 attr_string
= ", pure";
13510 else if ((classify
& RS6000_BTC_FP
) != 0)
13512 /* Function is a math function. If rounding mode is on, then treat the
13513 function as not reading global memory, but it can have arbitrary side
13514 effects. If it is off, then assume the function is a const function.
13515 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13516 builtin-attribute.def that is used for the math functions. */
13517 TREE_NOTHROW (t
) = 1;
13518 if (flag_rounding_math
)
13520 DECL_PURE_P (t
) = 1;
13521 DECL_IS_NOVOPS (t
) = 1;
13522 attr_string
= ", fp, pure";
13526 TREE_READONLY (t
) = 1;
13527 attr_string
= ", fp, const";
13530 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13531 gcc_unreachable ();
13533 if (TARGET_DEBUG_BUILTIN
)
13534 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13535 (int)code
, name
, attr_string
);
13538 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13540 #undef RS6000_BUILTIN_0
13541 #undef RS6000_BUILTIN_1
13542 #undef RS6000_BUILTIN_2
13543 #undef RS6000_BUILTIN_3
13544 #undef RS6000_BUILTIN_A
13545 #undef RS6000_BUILTIN_D
13546 #undef RS6000_BUILTIN_H
13547 #undef RS6000_BUILTIN_P
13548 #undef RS6000_BUILTIN_Q
13549 #undef RS6000_BUILTIN_X
13551 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13552 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13553 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13554 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13555 { MASK, ICODE, NAME, ENUM },
13557 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13558 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13559 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13560 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13561 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13562 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13564 static const struct builtin_description bdesc_3arg
[] =
13566 #include "rs6000-builtin.def"
13569 /* DST operations: void foo (void *, const int, const char). */
13571 #undef RS6000_BUILTIN_0
13572 #undef RS6000_BUILTIN_1
13573 #undef RS6000_BUILTIN_2
13574 #undef RS6000_BUILTIN_3
13575 #undef RS6000_BUILTIN_A
13576 #undef RS6000_BUILTIN_D
13577 #undef RS6000_BUILTIN_H
13578 #undef RS6000_BUILTIN_P
13579 #undef RS6000_BUILTIN_Q
13580 #undef RS6000_BUILTIN_X
13582 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13583 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13584 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13585 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13586 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13587 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13588 { MASK, ICODE, NAME, ENUM },
13590 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13591 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13592 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13593 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13595 static const struct builtin_description bdesc_dst
[] =
13597 #include "rs6000-builtin.def"
13600 /* Simple binary operations: VECc = foo (VECa, VECb). */
13602 #undef RS6000_BUILTIN_0
13603 #undef RS6000_BUILTIN_1
13604 #undef RS6000_BUILTIN_2
13605 #undef RS6000_BUILTIN_3
13606 #undef RS6000_BUILTIN_A
13607 #undef RS6000_BUILTIN_D
13608 #undef RS6000_BUILTIN_H
13609 #undef RS6000_BUILTIN_P
13610 #undef RS6000_BUILTIN_Q
13611 #undef RS6000_BUILTIN_X
13613 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13614 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13615 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13616 { MASK, ICODE, NAME, ENUM },
13618 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13619 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13620 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13621 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13622 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13623 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13624 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13626 static const struct builtin_description bdesc_2arg
[] =
13628 #include "rs6000-builtin.def"
13631 #undef RS6000_BUILTIN_0
13632 #undef RS6000_BUILTIN_1
13633 #undef RS6000_BUILTIN_2
13634 #undef RS6000_BUILTIN_3
13635 #undef RS6000_BUILTIN_A
13636 #undef RS6000_BUILTIN_D
13637 #undef RS6000_BUILTIN_H
13638 #undef RS6000_BUILTIN_P
13639 #undef RS6000_BUILTIN_Q
13640 #undef RS6000_BUILTIN_X
13642 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13643 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13644 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13645 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13646 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13647 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13648 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13649 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13650 { MASK, ICODE, NAME, ENUM },
13652 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13653 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13655 /* AltiVec predicates. */
13657 static const struct builtin_description bdesc_altivec_preds
[] =
13659 #include "rs6000-builtin.def"
13662 /* PAIRED predicates. */
13663 #undef RS6000_BUILTIN_0
13664 #undef RS6000_BUILTIN_1
13665 #undef RS6000_BUILTIN_2
13666 #undef RS6000_BUILTIN_3
13667 #undef RS6000_BUILTIN_A
13668 #undef RS6000_BUILTIN_D
13669 #undef RS6000_BUILTIN_H
13670 #undef RS6000_BUILTIN_P
13671 #undef RS6000_BUILTIN_Q
13672 #undef RS6000_BUILTIN_X
13674 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13675 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13676 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13677 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13678 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13679 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13680 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13681 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13682 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13683 { MASK, ICODE, NAME, ENUM },
13685 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13687 static const struct builtin_description bdesc_paired_preds
[] =
13689 #include "rs6000-builtin.def"
13692 /* ABS* operations. */
13694 #undef RS6000_BUILTIN_0
13695 #undef RS6000_BUILTIN_1
13696 #undef RS6000_BUILTIN_2
13697 #undef RS6000_BUILTIN_3
13698 #undef RS6000_BUILTIN_A
13699 #undef RS6000_BUILTIN_D
13700 #undef RS6000_BUILTIN_H
13701 #undef RS6000_BUILTIN_P
13702 #undef RS6000_BUILTIN_Q
13703 #undef RS6000_BUILTIN_X
13705 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13706 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13707 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13708 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13709 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13710 { MASK, ICODE, NAME, ENUM },
13712 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13713 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13714 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13715 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13716 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13718 static const struct builtin_description bdesc_abs
[] =
13720 #include "rs6000-builtin.def"
13723 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13726 #undef RS6000_BUILTIN_0
13727 #undef RS6000_BUILTIN_1
13728 #undef RS6000_BUILTIN_2
13729 #undef RS6000_BUILTIN_3
13730 #undef RS6000_BUILTIN_A
13731 #undef RS6000_BUILTIN_D
13732 #undef RS6000_BUILTIN_H
13733 #undef RS6000_BUILTIN_P
13734 #undef RS6000_BUILTIN_Q
13735 #undef RS6000_BUILTIN_X
13737 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13738 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13739 { MASK, ICODE, NAME, ENUM },
13741 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13742 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13743 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13744 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13745 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13746 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13747 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13748 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13750 static const struct builtin_description bdesc_1arg
[] =
13752 #include "rs6000-builtin.def"
13755 /* Simple no-argument operations: result = __builtin_darn_32 () */
13757 #undef RS6000_BUILTIN_0
13758 #undef RS6000_BUILTIN_1
13759 #undef RS6000_BUILTIN_2
13760 #undef RS6000_BUILTIN_3
13761 #undef RS6000_BUILTIN_A
13762 #undef RS6000_BUILTIN_D
13763 #undef RS6000_BUILTIN_H
13764 #undef RS6000_BUILTIN_P
13765 #undef RS6000_BUILTIN_Q
13766 #undef RS6000_BUILTIN_X
13768 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13769 { MASK, ICODE, NAME, ENUM },
13771 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13772 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13773 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13774 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13775 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13776 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13777 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13778 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13779 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13781 static const struct builtin_description bdesc_0arg
[] =
13783 #include "rs6000-builtin.def"
13786 /* HTM builtins. */
13787 #undef RS6000_BUILTIN_0
13788 #undef RS6000_BUILTIN_1
13789 #undef RS6000_BUILTIN_2
13790 #undef RS6000_BUILTIN_3
13791 #undef RS6000_BUILTIN_A
13792 #undef RS6000_BUILTIN_D
13793 #undef RS6000_BUILTIN_H
13794 #undef RS6000_BUILTIN_P
13795 #undef RS6000_BUILTIN_Q
13796 #undef RS6000_BUILTIN_X
13798 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13799 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13800 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13801 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13802 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13803 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13804 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13805 { MASK, ICODE, NAME, ENUM },
13807 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13808 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13809 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13811 static const struct builtin_description bdesc_htm
[] =
13813 #include "rs6000-builtin.def"
13816 #undef RS6000_BUILTIN_0
13817 #undef RS6000_BUILTIN_1
13818 #undef RS6000_BUILTIN_2
13819 #undef RS6000_BUILTIN_3
13820 #undef RS6000_BUILTIN_A
13821 #undef RS6000_BUILTIN_D
13822 #undef RS6000_BUILTIN_H
13823 #undef RS6000_BUILTIN_P
13824 #undef RS6000_BUILTIN_Q
13826 /* Return true if a builtin function is overloaded. */
13828 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13830 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13834 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13836 return rs6000_builtin_info
[(int)fncode
].name
;
13839 /* Expand an expression EXP that calls a builtin without arguments. */
13841 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13844 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13846 if (icode
== CODE_FOR_nothing
)
13847 /* Builtin not supported on this processor. */
13851 || GET_MODE (target
) != tmode
13852 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13853 target
= gen_reg_rtx (tmode
);
13855 pat
= GEN_FCN (icode
) (target
);
13865 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13868 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13869 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13870 rtx op0
= expand_normal (arg0
);
13871 rtx op1
= expand_normal (arg1
);
13872 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13873 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13875 if (icode
== CODE_FOR_nothing
)
13876 /* Builtin not supported on this processor. */
13879 /* If we got invalid arguments bail out before generating bad rtl. */
13880 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13883 if (GET_CODE (op0
) != CONST_INT
13884 || INTVAL (op0
) > 255
13885 || INTVAL (op0
) < 0)
13887 error ("argument 1 must be an 8-bit field value");
13891 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13892 op0
= copy_to_mode_reg (mode0
, op0
);
13894 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13895 op1
= copy_to_mode_reg (mode1
, op1
);
13897 pat
= GEN_FCN (icode
) (op0
, op1
);
13906 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13909 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13910 rtx op0
= expand_normal (arg0
);
13911 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13912 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13914 if (icode
== CODE_FOR_nothing
)
13915 /* Builtin not supported on this processor. */
13918 /* If we got invalid arguments bail out before generating bad rtl. */
13919 if (arg0
== error_mark_node
)
13922 if (icode
== CODE_FOR_altivec_vspltisb
13923 || icode
== CODE_FOR_altivec_vspltish
13924 || icode
== CODE_FOR_altivec_vspltisw
)
13926 /* Only allow 5-bit *signed* literals. */
13927 if (GET_CODE (op0
) != CONST_INT
13928 || INTVAL (op0
) > 15
13929 || INTVAL (op0
) < -16)
13931 error ("argument 1 must be a 5-bit signed literal");
13932 return CONST0_RTX (tmode
);
13937 || GET_MODE (target
) != tmode
13938 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13939 target
= gen_reg_rtx (tmode
);
13941 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13942 op0
= copy_to_mode_reg (mode0
, op0
);
13944 pat
= GEN_FCN (icode
) (target
, op0
);
13953 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
13955 rtx pat
, scratch1
, scratch2
;
13956 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13957 rtx op0
= expand_normal (arg0
);
13958 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13959 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13961 /* If we have invalid arguments, bail out before generating bad rtl. */
13962 if (arg0
== error_mark_node
)
13966 || GET_MODE (target
) != tmode
13967 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13968 target
= gen_reg_rtx (tmode
);
13970 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13971 op0
= copy_to_mode_reg (mode0
, op0
);
13973 scratch1
= gen_reg_rtx (mode0
);
13974 scratch2
= gen_reg_rtx (mode0
);
13976 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
13985 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13988 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13989 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13990 rtx op0
= expand_normal (arg0
);
13991 rtx op1
= expand_normal (arg1
);
13992 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13993 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13994 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13996 if (icode
== CODE_FOR_nothing
)
13997 /* Builtin not supported on this processor. */
14000 /* If we got invalid arguments bail out before generating bad rtl. */
14001 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14008 case CODE_FOR_altivec_vcfux
:
14009 case CODE_FOR_altivec_vcfsx
:
14010 case CODE_FOR_altivec_vctsxs
:
14011 case CODE_FOR_altivec_vctuxs
:
14012 case CODE_FOR_altivec_vspltb
:
14013 case CODE_FOR_altivec_vsplth
:
14014 case CODE_FOR_altivec_vspltw
:
14015 /* Only allow 5-bit unsigned literals. */
14017 if (TREE_CODE (arg1
) != INTEGER_CST
14018 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
14020 error ("argument 2 must be a 5-bit unsigned literal");
14021 return CONST0_RTX (tmode
);
14024 case CODE_FOR_dfptstsfi_eq_dd
:
14025 case CODE_FOR_dfptstsfi_lt_dd
:
14026 case CODE_FOR_dfptstsfi_gt_dd
:
14027 case CODE_FOR_dfptstsfi_unordered_dd
:
14028 case CODE_FOR_dfptstsfi_eq_td
:
14029 case CODE_FOR_dfptstsfi_lt_td
:
14030 case CODE_FOR_dfptstsfi_gt_td
:
14031 case CODE_FOR_dfptstsfi_unordered_td
:
14032 /* Only allow 6-bit unsigned literals. */
14034 if (TREE_CODE (arg0
) != INTEGER_CST
14035 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
14037 error ("argument 1 must be a 6-bit unsigned literal");
14038 return CONST0_RTX (tmode
);
14041 case CODE_FOR_xststdcqp
:
14042 case CODE_FOR_xststdcdp
:
14043 case CODE_FOR_xststdcsp
:
14044 case CODE_FOR_xvtstdcdp
:
14045 case CODE_FOR_xvtstdcsp
:
14046 /* Only allow 7-bit unsigned literals. */
14048 if (TREE_CODE (arg1
) != INTEGER_CST
14049 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
14051 error ("argument 2 must be a 7-bit unsigned literal");
14052 return CONST0_RTX (tmode
);
14055 case CODE_FOR_unpackv1ti
:
14056 case CODE_FOR_unpackkf
:
14057 case CODE_FOR_unpacktf
:
14058 case CODE_FOR_unpackif
:
14059 case CODE_FOR_unpacktd
:
14060 /* Only allow 1-bit unsigned literals. */
14062 if (TREE_CODE (arg1
) != INTEGER_CST
14063 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
14065 error ("argument 2 must be a 1-bit unsigned literal");
14066 return CONST0_RTX (tmode
);
14072 || GET_MODE (target
) != tmode
14073 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14074 target
= gen_reg_rtx (tmode
);
14076 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14077 op0
= copy_to_mode_reg (mode0
, op0
);
14078 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14079 op1
= copy_to_mode_reg (mode1
, op1
);
14081 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
14090 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
14093 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
14094 tree arg0
= CALL_EXPR_ARG (exp
, 1);
14095 tree arg1
= CALL_EXPR_ARG (exp
, 2);
14096 rtx op0
= expand_normal (arg0
);
14097 rtx op1
= expand_normal (arg1
);
14098 machine_mode tmode
= SImode
;
14099 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14100 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14103 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
14105 error ("argument 1 of %qs must be a constant",
14106 "__builtin_altivec_predicate");
14110 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
14112 gcc_assert (mode0
== mode1
);
14114 /* If we have invalid arguments, bail out before generating bad rtl. */
14115 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14119 || GET_MODE (target
) != tmode
14120 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14121 target
= gen_reg_rtx (tmode
);
14123 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14124 op0
= copy_to_mode_reg (mode0
, op0
);
14125 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14126 op1
= copy_to_mode_reg (mode1
, op1
);
14128 /* Note that for many of the relevant operations (e.g. cmpne or
14129 cmpeq) with float or double operands, it makes more sense for the
14130 mode of the allocated scratch register to select a vector of
14131 integer. But the choice to copy the mode of operand 0 was made
14132 long ago and there are no plans to change it. */
14133 scratch
= gen_reg_rtx (mode0
);
14135 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
14140 /* The vec_any* and vec_all* predicates use the same opcodes for two
14141 different operations, but the bits in CR6 will be different
14142 depending on what information we want. So we have to play tricks
14143 with CR6 to get the right bits out.
14145 If you think this is disgusting, look at the specs for the
14146 AltiVec predicates. */
14148 switch (cr6_form_int
)
14151 emit_insn (gen_cr6_test_for_zero (target
));
14154 emit_insn (gen_cr6_test_for_zero_reverse (target
));
14157 emit_insn (gen_cr6_test_for_lt (target
));
14160 emit_insn (gen_cr6_test_for_lt_reverse (target
));
14163 error ("argument 1 of %qs is out of range",
14164 "__builtin_altivec_predicate");
14172 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
14175 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14176 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14177 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14178 machine_mode mode0
= Pmode
;
14179 machine_mode mode1
= Pmode
;
14180 rtx op0
= expand_normal (arg0
);
14181 rtx op1
= expand_normal (arg1
);
14183 if (icode
== CODE_FOR_nothing
)
14184 /* Builtin not supported on this processor. */
14187 /* If we got invalid arguments bail out before generating bad rtl. */
14188 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14192 || GET_MODE (target
) != tmode
14193 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14194 target
= gen_reg_rtx (tmode
);
14196 op1
= copy_to_mode_reg (mode1
, op1
);
14198 if (op0
== const0_rtx
)
14200 addr
= gen_rtx_MEM (tmode
, op1
);
14204 op0
= copy_to_mode_reg (mode0
, op0
);
14205 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
14208 pat
= GEN_FCN (icode
) (target
, addr
);
14217 /* Return a constant vector for use as a little-endian permute control vector
14218 to reverse the order of elements of the given vector mode. */
14220 swap_selector_for_mode (machine_mode mode
)
14222 /* These are little endian vectors, so their elements are reversed
14223 from what you would normally expect for a permute control vector. */
14224 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14225 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14226 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14227 unsigned int swap16
[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14228 unsigned int *swaparray
, i
;
14245 swaparray
= swap16
;
14248 gcc_unreachable ();
14251 for (i
= 0; i
< 16; ++i
)
14252 perm
[i
] = GEN_INT (swaparray
[i
]);
14254 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
)));
14257 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14258 with -maltivec=be specified. Issue the load followed by an element-
14259 reversing permute. */
14261 altivec_expand_lvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14263 rtx tmp
= gen_reg_rtx (mode
);
14264 rtx load
= gen_rtx_SET (tmp
, op1
);
14265 rtx lvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14266 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, load
, lvx
));
14267 rtx sel
= swap_selector_for_mode (mode
);
14268 rtx vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, tmp
, tmp
, sel
), UNSPEC_VPERM
);
14270 gcc_assert (REG_P (op0
));
14272 emit_insn (gen_rtx_SET (op0
, vperm
));
14275 /* Generate code for a "stvxl" built-in for a little endian target with
14276 -maltivec=be specified. Issue the store preceded by an element-reversing
14279 altivec_expand_stvx_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14281 rtx tmp
= gen_reg_rtx (mode
);
14282 rtx store
= gen_rtx_SET (op0
, tmp
);
14283 rtx stvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
14284 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, store
, stvx
));
14285 rtx sel
= swap_selector_for_mode (mode
);
14288 gcc_assert (REG_P (op1
));
14289 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14290 emit_insn (gen_rtx_SET (tmp
, vperm
));
14294 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14295 specified. Issue the store preceded by an element-reversing permute. */
14297 altivec_expand_stvex_be (rtx op0
, rtx op1
, machine_mode mode
, unsigned unspec
)
14299 machine_mode inner_mode
= GET_MODE_INNER (mode
);
14300 rtx tmp
= gen_reg_rtx (mode
);
14301 rtx stvx
= gen_rtx_UNSPEC (inner_mode
, gen_rtvec (1, tmp
), unspec
);
14302 rtx sel
= swap_selector_for_mode (mode
);
14305 gcc_assert (REG_P (op1
));
14306 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
14307 emit_insn (gen_rtx_SET (tmp
, vperm
));
14308 emit_insn (gen_rtx_SET (op0
, stvx
));
14312 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14315 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14316 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14317 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14318 machine_mode mode0
= Pmode
;
14319 machine_mode mode1
= Pmode
;
14320 rtx op0
= expand_normal (arg0
);
14321 rtx op1
= expand_normal (arg1
);
14323 if (icode
== CODE_FOR_nothing
)
14324 /* Builtin not supported on this processor. */
14327 /* If we got invalid arguments bail out before generating bad rtl. */
14328 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14332 || GET_MODE (target
) != tmode
14333 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14334 target
= gen_reg_rtx (tmode
);
14336 op1
= copy_to_mode_reg (mode1
, op1
);
14338 /* For LVX, express the RTL accurately by ANDing the address with -16.
14339 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14340 so the raw address is fine. */
14341 if (icode
== CODE_FOR_altivec_lvx_v2df_2op
14342 || icode
== CODE_FOR_altivec_lvx_v2di_2op
14343 || icode
== CODE_FOR_altivec_lvx_v4sf_2op
14344 || icode
== CODE_FOR_altivec_lvx_v4si_2op
14345 || icode
== CODE_FOR_altivec_lvx_v8hi_2op
14346 || icode
== CODE_FOR_altivec_lvx_v16qi_2op
)
14349 if (op0
== const0_rtx
)
14353 op0
= copy_to_mode_reg (mode0
, op0
);
14354 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
14356 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14357 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
14359 /* For -maltivec=be, emit the load and follow it up with a
14360 permute to swap the elements. */
14361 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14363 rtx temp
= gen_reg_rtx (tmode
);
14364 emit_insn (gen_rtx_SET (temp
, addr
));
14366 rtx sel
= swap_selector_for_mode (tmode
);
14367 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, temp
, temp
, sel
),
14369 emit_insn (gen_rtx_SET (target
, vperm
));
14372 emit_insn (gen_rtx_SET (target
, addr
));
14376 if (op0
== const0_rtx
)
14377 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14380 op0
= copy_to_mode_reg (mode0
, op0
);
14381 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14382 gen_rtx_PLUS (Pmode
, op1
, op0
));
14385 pat
= GEN_FCN (icode
) (target
, addr
);
14395 altivec_expand_xl_be_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14398 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14399 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14400 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14401 machine_mode mode0
= Pmode
;
14402 machine_mode mode1
= Pmode
;
14403 rtx op0
= expand_normal (arg0
);
14404 rtx op1
= expand_normal (arg1
);
14406 if (icode
== CODE_FOR_nothing
)
14407 /* Builtin not supported on this processor. */
14410 /* If we got invalid arguments bail out before generating bad rtl. */
14411 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14415 || GET_MODE (target
) != tmode
14416 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14417 target
= gen_reg_rtx (tmode
);
14419 op1
= copy_to_mode_reg (mode1
, op1
);
14421 if (op0
== const0_rtx
)
14422 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14425 op0
= copy_to_mode_reg (mode0
, op0
);
14426 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14427 gen_rtx_PLUS (Pmode
, op1
, op0
));
14430 pat
= GEN_FCN (icode
) (target
, addr
);
14435 /* Reverse element order of elements if in LE mode */
14436 if (!VECTOR_ELT_ORDER_BIG
)
14438 rtx sel
= swap_selector_for_mode (tmode
);
14439 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, target
, target
, sel
),
14441 emit_insn (gen_rtx_SET (target
, vperm
));
14447 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
14449 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14450 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14451 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14452 rtx op0
= expand_normal (arg0
);
14453 rtx op1
= expand_normal (arg1
);
14454 rtx op2
= expand_normal (arg2
);
14456 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14457 machine_mode mode1
= Pmode
;
14458 machine_mode mode2
= Pmode
;
14460 /* Invalid arguments. Bail before doing anything stoopid! */
14461 if (arg0
== error_mark_node
14462 || arg1
== error_mark_node
14463 || arg2
== error_mark_node
)
14466 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
14467 op0
= copy_to_mode_reg (tmode
, op0
);
14469 op2
= copy_to_mode_reg (mode2
, op2
);
14471 if (op1
== const0_rtx
)
14473 addr
= gen_rtx_MEM (tmode
, op2
);
14477 op1
= copy_to_mode_reg (mode1
, op1
);
14478 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
14481 pat
= GEN_FCN (icode
) (addr
, op0
);
14488 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
14491 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14492 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14493 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14494 rtx op0
= expand_normal (arg0
);
14495 rtx op1
= expand_normal (arg1
);
14496 rtx op2
= expand_normal (arg2
);
14497 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
14498 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
14499 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
14501 if (icode
== CODE_FOR_nothing
)
14502 /* Builtin not supported on this processor. */
14505 /* If we got invalid arguments bail out before generating bad rtl. */
14506 if (arg0
== error_mark_node
14507 || arg1
== error_mark_node
14508 || arg2
== error_mark_node
)
14511 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14512 op0
= copy_to_mode_reg (mode0
, op0
);
14513 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14514 op1
= copy_to_mode_reg (mode1
, op1
);
14515 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14516 op2
= copy_to_mode_reg (mode2
, op2
);
14518 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14526 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
14528 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14529 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14530 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14531 rtx op0
= expand_normal (arg0
);
14532 rtx op1
= expand_normal (arg1
);
14533 rtx op2
= expand_normal (arg2
);
14534 rtx pat
, addr
, rawaddr
;
14535 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14536 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
14537 machine_mode mode1
= Pmode
;
14538 machine_mode mode2
= Pmode
;
14540 /* Invalid arguments. Bail before doing anything stoopid! */
14541 if (arg0
== error_mark_node
14542 || arg1
== error_mark_node
14543 || arg2
== error_mark_node
)
14546 op2
= copy_to_mode_reg (mode2
, op2
);
14548 /* For STVX, express the RTL accurately by ANDing the address with -16.
14549 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14550 so the raw address is fine. */
14551 if (icode
== CODE_FOR_altivec_stvx_v2df_2op
14552 || icode
== CODE_FOR_altivec_stvx_v2di_2op
14553 || icode
== CODE_FOR_altivec_stvx_v4sf_2op
14554 || icode
== CODE_FOR_altivec_stvx_v4si_2op
14555 || icode
== CODE_FOR_altivec_stvx_v8hi_2op
14556 || icode
== CODE_FOR_altivec_stvx_v16qi_2op
)
14558 if (op1
== const0_rtx
)
14562 op1
= copy_to_mode_reg (mode1
, op1
);
14563 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
14566 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14567 addr
= gen_rtx_MEM (tmode
, addr
);
14569 op0
= copy_to_mode_reg (tmode
, op0
);
14571 /* For -maltivec=be, emit a permute to swap the elements, followed
14573 if (!BYTES_BIG_ENDIAN
&& VECTOR_ELT_ORDER_BIG
)
14575 rtx temp
= gen_reg_rtx (tmode
);
14576 rtx sel
= swap_selector_for_mode (tmode
);
14577 rtx vperm
= gen_rtx_UNSPEC (tmode
, gen_rtvec (3, op0
, op0
, sel
),
14579 emit_insn (gen_rtx_SET (temp
, vperm
));
14580 emit_insn (gen_rtx_SET (addr
, temp
));
14583 emit_insn (gen_rtx_SET (addr
, op0
));
14587 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
14588 op0
= copy_to_mode_reg (smode
, op0
);
14590 if (op1
== const0_rtx
)
14591 addr
= gen_rtx_MEM (tmode
, op2
);
14594 op1
= copy_to_mode_reg (mode1
, op1
);
14595 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
14598 pat
= GEN_FCN (icode
) (addr
, op0
);
14606 /* Return the appropriate SPR number associated with the given builtin. */
14607 static inline HOST_WIDE_INT
14608 htm_spr_num (enum rs6000_builtins code
)
14610 if (code
== HTM_BUILTIN_GET_TFHAR
14611 || code
== HTM_BUILTIN_SET_TFHAR
)
14613 else if (code
== HTM_BUILTIN_GET_TFIAR
14614 || code
== HTM_BUILTIN_SET_TFIAR
)
14616 else if (code
== HTM_BUILTIN_GET_TEXASR
14617 || code
== HTM_BUILTIN_SET_TEXASR
)
14619 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14620 || code
== HTM_BUILTIN_SET_TEXASRU
);
14621 return TEXASRU_SPR
;
14624 /* Return the appropriate SPR regno associated with the given builtin. */
14625 static inline HOST_WIDE_INT
14626 htm_spr_regno (enum rs6000_builtins code
)
14628 if (code
== HTM_BUILTIN_GET_TFHAR
14629 || code
== HTM_BUILTIN_SET_TFHAR
)
14630 return TFHAR_REGNO
;
14631 else if (code
== HTM_BUILTIN_GET_TFIAR
14632 || code
== HTM_BUILTIN_SET_TFIAR
)
14633 return TFIAR_REGNO
;
14634 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14635 || code
== HTM_BUILTIN_SET_TEXASR
14636 || code
== HTM_BUILTIN_GET_TEXASRU
14637 || code
== HTM_BUILTIN_SET_TEXASRU
);
14638 return TEXASR_REGNO
;
14641 /* Return the correct ICODE value depending on whether we are
14642 setting or reading the HTM SPRs. */
14643 static inline enum insn_code
14644 rs6000_htm_spr_icode (bool nonvoid
)
14647 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14649 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14652 /* Expand the HTM builtin in EXP and store the result in TARGET.
14653 Store true in *EXPANDEDP if we found a builtin to expand. */
14655 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14657 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14658 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14659 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14660 const struct builtin_description
*d
;
14665 if (!TARGET_POWERPC64
14666 && (fcode
== HTM_BUILTIN_TABORTDC
14667 || fcode
== HTM_BUILTIN_TABORTDCI
))
14669 size_t uns_fcode
= (size_t)fcode
;
14670 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14671 error ("builtin %qs is only valid in 64-bit mode", name
);
14675 /* Expand the HTM builtins. */
14677 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14678 if (d
->code
== fcode
)
14680 rtx op
[MAX_HTM_OPERANDS
], pat
;
14683 call_expr_arg_iterator iter
;
14684 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14685 enum insn_code icode
= d
->icode
;
14686 const struct insn_operand_data
*insn_op
;
14687 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14691 icode
= rs6000_htm_spr_icode (nonvoid
);
14692 insn_op
= &insn_data
[icode
].operand
[0];
14696 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: SImode
;
14698 || GET_MODE (target
) != tmode
14699 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14700 target
= gen_reg_rtx (tmode
);
14702 op
[nopnds
++] = target
;
14705 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14707 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14710 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14712 op
[nopnds
] = expand_normal (arg
);
14714 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14716 if (!strcmp (insn_op
->constraint
, "n"))
14718 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14719 if (!CONST_INT_P (op
[nopnds
]))
14720 error ("argument %d must be an unsigned literal", arg_num
);
14722 error ("argument %d is an unsigned literal that is "
14723 "out of range", arg_num
);
14726 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14732 /* Handle the builtins for extended mnemonics. These accept
14733 no arguments, but map to builtins that take arguments. */
14736 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14737 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14738 op
[nopnds
++] = GEN_INT (1);
14740 attr
|= RS6000_BTC_UNARY
;
14742 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14743 op
[nopnds
++] = GEN_INT (0);
14745 attr
|= RS6000_BTC_UNARY
;
14751 /* If this builtin accesses SPRs, then pass in the appropriate
14752 SPR number and SPR regno as the last two operands. */
14755 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14756 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14757 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14759 /* If this builtin accesses a CR, then pass in a scratch
14760 CR as the last operand. */
14761 else if (attr
& RS6000_BTC_CR
)
14762 { cr
= gen_reg_rtx (CCmode
);
14768 int expected_nopnds
= 0;
14769 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14770 expected_nopnds
= 1;
14771 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14772 expected_nopnds
= 2;
14773 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14774 expected_nopnds
= 3;
14775 if (!(attr
& RS6000_BTC_VOID
))
14776 expected_nopnds
+= 1;
14778 expected_nopnds
+= 2;
14780 gcc_assert (nopnds
== expected_nopnds
14781 && nopnds
<= MAX_HTM_OPERANDS
);
14787 pat
= GEN_FCN (icode
) (op
[0]);
14790 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14793 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14796 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14799 gcc_unreachable ();
14805 if (attr
& RS6000_BTC_CR
)
14807 if (fcode
== HTM_BUILTIN_TBEGIN
)
14809 /* Emit code to set TARGET to true or false depending on
14810 whether the tbegin. instruction successfully or failed
14811 to start a transaction. We do this by placing the 1's
14812 complement of CR's EQ bit into TARGET. */
14813 rtx scratch
= gen_reg_rtx (SImode
);
14814 emit_insn (gen_rtx_SET (scratch
,
14815 gen_rtx_EQ (SImode
, cr
,
14817 emit_insn (gen_rtx_SET (target
,
14818 gen_rtx_XOR (SImode
, scratch
,
14823 /* Emit code to copy the 4-bit condition register field
14824 CR into the least significant end of register TARGET. */
14825 rtx scratch1
= gen_reg_rtx (SImode
);
14826 rtx scratch2
= gen_reg_rtx (SImode
);
14827 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14828 emit_insn (gen_movcc (subreg
, cr
));
14829 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14830 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14839 *expandedp
= false;
14843 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14846 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14849 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14850 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14853 if (target
== 0 || GET_MODE (target
) != SImode
)
14854 target
= gen_reg_rtx (SImode
);
14856 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14857 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14858 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14859 to a STRING_CST. */
14860 if (TREE_CODE (arg
) == ARRAY_REF
14861 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14862 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14863 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14864 arg
= TREE_OPERAND (arg
, 0);
14866 if (TREE_CODE (arg
) != STRING_CST
)
14868 error ("builtin %qs only accepts a string argument",
14869 rs6000_builtin_info
[(size_t) fcode
].name
);
14873 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14875 const char *cpu
= TREE_STRING_POINTER (arg
);
14876 rtx cpuid
= NULL_RTX
;
14877 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14878 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14880 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14881 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14884 if (cpuid
== NULL_RTX
)
14886 /* Invalid CPU argument. */
14887 error ("cpu %qs is an invalid argument to builtin %qs",
14888 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14892 rtx platform
= gen_reg_rtx (SImode
);
14893 rtx tcbmem
= gen_const_mem (SImode
,
14894 gen_rtx_PLUS (Pmode
,
14895 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14896 GEN_INT (TCB_PLATFORM_OFFSET
)));
14897 emit_move_insn (platform
, tcbmem
);
14898 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14900 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14902 const char *hwcap
= TREE_STRING_POINTER (arg
);
14903 rtx mask
= NULL_RTX
;
14905 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14906 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14908 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14909 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14912 if (mask
== NULL_RTX
)
14914 /* Invalid HWCAP argument. */
14915 error ("%s %qs is an invalid argument to builtin %qs",
14916 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14920 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14921 rtx tcbmem
= gen_const_mem (SImode
,
14922 gen_rtx_PLUS (Pmode
,
14923 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14924 GEN_INT (hwcap_offset
)));
14925 emit_move_insn (tcb_hwcap
, tcbmem
);
14926 rtx scratch1
= gen_reg_rtx (SImode
);
14927 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14928 rtx scratch2
= gen_reg_rtx (SImode
);
14929 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14930 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14933 gcc_unreachable ();
14935 /* Record that we have expanded a CPU builtin, so that we can later
14936 emit a reference to the special symbol exported by LIBC to ensure we
14937 do not link against an old LIBC that doesn't support this feature. */
14938 cpu_builtin_p
= true;
14941 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14942 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14944 /* For old LIBCs, always return FALSE. */
14945 emit_move_insn (target
, GEN_INT (0));
14946 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14952 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14955 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14956 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14957 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14958 rtx op0
= expand_normal (arg0
);
14959 rtx op1
= expand_normal (arg1
);
14960 rtx op2
= expand_normal (arg2
);
14961 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14962 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14963 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14964 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
14966 if (icode
== CODE_FOR_nothing
)
14967 /* Builtin not supported on this processor. */
14970 /* If we got invalid arguments bail out before generating bad rtl. */
14971 if (arg0
== error_mark_node
14972 || arg1
== error_mark_node
14973 || arg2
== error_mark_node
)
14976 /* Check and prepare argument depending on the instruction code.
14978 Note that a switch statement instead of the sequence of tests
14979 would be incorrect as many of the CODE_FOR values could be
14980 CODE_FOR_nothing and that would yield multiple alternatives
14981 with identical values. We'd never reach here at runtime in
14983 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
14984 || icode
== CODE_FOR_altivec_vsldoi_v2df
14985 || icode
== CODE_FOR_altivec_vsldoi_v4si
14986 || icode
== CODE_FOR_altivec_vsldoi_v8hi
14987 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
14989 /* Only allow 4-bit unsigned literals. */
14991 if (TREE_CODE (arg2
) != INTEGER_CST
14992 || TREE_INT_CST_LOW (arg2
) & ~0xf)
14994 error ("argument 3 must be a 4-bit unsigned literal");
14995 return CONST0_RTX (tmode
);
14998 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
14999 || icode
== CODE_FOR_vsx_xxpermdi_v2di
15000 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
15001 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
15002 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
15003 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
15004 || icode
== CODE_FOR_vsx_xxpermdi_v4si
15005 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
15006 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
15007 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
15008 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
15009 || icode
== CODE_FOR_vsx_xxsldwi_v4si
15010 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
15011 || icode
== CODE_FOR_vsx_xxsldwi_v2di
15012 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
15014 /* Only allow 2-bit unsigned literals. */
15016 if (TREE_CODE (arg2
) != INTEGER_CST
15017 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15019 error ("argument 3 must be a 2-bit unsigned literal");
15020 return CONST0_RTX (tmode
);
15023 else if (icode
== CODE_FOR_vsx_set_v2df
15024 || icode
== CODE_FOR_vsx_set_v2di
15025 || icode
== CODE_FOR_bcdadd
15026 || icode
== CODE_FOR_bcdadd_lt
15027 || icode
== CODE_FOR_bcdadd_eq
15028 || icode
== CODE_FOR_bcdadd_gt
15029 || icode
== CODE_FOR_bcdsub
15030 || icode
== CODE_FOR_bcdsub_lt
15031 || icode
== CODE_FOR_bcdsub_eq
15032 || icode
== CODE_FOR_bcdsub_gt
)
15034 /* Only allow 1-bit unsigned literals. */
15036 if (TREE_CODE (arg2
) != INTEGER_CST
15037 || TREE_INT_CST_LOW (arg2
) & ~0x1)
15039 error ("argument 3 must be a 1-bit unsigned literal");
15040 return CONST0_RTX (tmode
);
15043 else if (icode
== CODE_FOR_dfp_ddedpd_dd
15044 || icode
== CODE_FOR_dfp_ddedpd_td
)
15046 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15048 if (TREE_CODE (arg0
) != INTEGER_CST
15049 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15051 error ("argument 1 must be 0 or 2");
15052 return CONST0_RTX (tmode
);
15055 else if (icode
== CODE_FOR_dfp_denbcd_dd
15056 || icode
== CODE_FOR_dfp_denbcd_td
)
15058 /* Only allow 1-bit unsigned literals. */
15060 if (TREE_CODE (arg0
) != INTEGER_CST
15061 || TREE_INT_CST_LOW (arg0
) & ~0x1)
15063 error ("argument 1 must be a 1-bit unsigned literal");
15064 return CONST0_RTX (tmode
);
15067 else if (icode
== CODE_FOR_dfp_dscli_dd
15068 || icode
== CODE_FOR_dfp_dscli_td
15069 || icode
== CODE_FOR_dfp_dscri_dd
15070 || icode
== CODE_FOR_dfp_dscri_td
)
15072 /* Only allow 6-bit unsigned literals. */
15074 if (TREE_CODE (arg1
) != INTEGER_CST
15075 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
15077 error ("argument 2 must be a 6-bit unsigned literal");
15078 return CONST0_RTX (tmode
);
15081 else if (icode
== CODE_FOR_crypto_vshasigmaw
15082 || icode
== CODE_FOR_crypto_vshasigmad
)
15084 /* Check whether the 2nd and 3rd arguments are integer constants and in
15085 range and prepare arguments. */
15087 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (arg1
, 2))
15089 error ("argument 2 must be 0 or 1");
15090 return CONST0_RTX (tmode
);
15094 if (TREE_CODE (arg2
) != INTEGER_CST
|| wi::geu_p (arg2
, 16))
15096 error ("argument 3 must be in the range 0..15");
15097 return CONST0_RTX (tmode
);
15102 || GET_MODE (target
) != tmode
15103 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15104 target
= gen_reg_rtx (tmode
);
15106 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15107 op0
= copy_to_mode_reg (mode0
, op0
);
15108 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15109 op1
= copy_to_mode_reg (mode1
, op1
);
15110 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
15111 op2
= copy_to_mode_reg (mode2
, op2
);
15113 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
15114 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
15116 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
15124 /* Expand the lvx builtins. */
15126 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
15128 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15129 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15131 machine_mode tmode
, mode0
;
15133 enum insn_code icode
;
15137 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
15138 icode
= CODE_FOR_vector_altivec_load_v16qi
;
15140 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
15141 icode
= CODE_FOR_vector_altivec_load_v8hi
;
15143 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
15144 icode
= CODE_FOR_vector_altivec_load_v4si
;
15146 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
15147 icode
= CODE_FOR_vector_altivec_load_v4sf
;
15149 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
15150 icode
= CODE_FOR_vector_altivec_load_v2df
;
15152 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
15153 icode
= CODE_FOR_vector_altivec_load_v2di
;
15155 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti
:
15156 icode
= CODE_FOR_vector_altivec_load_v1ti
;
15159 *expandedp
= false;
15165 arg0
= CALL_EXPR_ARG (exp
, 0);
15166 op0
= expand_normal (arg0
);
15167 tmode
= insn_data
[icode
].operand
[0].mode
;
15168 mode0
= insn_data
[icode
].operand
[1].mode
;
15171 || GET_MODE (target
) != tmode
15172 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15173 target
= gen_reg_rtx (tmode
);
15175 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15176 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15178 pat
= GEN_FCN (icode
) (target
, op0
);
15185 /* Expand the stvx builtins. */
15187 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15190 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15191 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
15193 machine_mode mode0
, mode1
;
15195 enum insn_code icode
;
15199 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
15200 icode
= CODE_FOR_vector_altivec_store_v16qi
;
15202 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
15203 icode
= CODE_FOR_vector_altivec_store_v8hi
;
15205 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
15206 icode
= CODE_FOR_vector_altivec_store_v4si
;
15208 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
15209 icode
= CODE_FOR_vector_altivec_store_v4sf
;
15211 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
15212 icode
= CODE_FOR_vector_altivec_store_v2df
;
15214 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
15215 icode
= CODE_FOR_vector_altivec_store_v2di
;
15217 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti
:
15218 icode
= CODE_FOR_vector_altivec_store_v1ti
;
15221 *expandedp
= false;
15225 arg0
= CALL_EXPR_ARG (exp
, 0);
15226 arg1
= CALL_EXPR_ARG (exp
, 1);
15227 op0
= expand_normal (arg0
);
15228 op1
= expand_normal (arg1
);
15229 mode0
= insn_data
[icode
].operand
[0].mode
;
15230 mode1
= insn_data
[icode
].operand
[1].mode
;
15232 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15233 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
15234 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
15235 op1
= copy_to_mode_reg (mode1
, op1
);
15237 pat
= GEN_FCN (icode
) (op0
, op1
);
15245 /* Expand the dst builtins. */
15247 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
15250 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15251 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15252 tree arg0
, arg1
, arg2
;
15253 machine_mode mode0
, mode1
;
15254 rtx pat
, op0
, op1
, op2
;
15255 const struct builtin_description
*d
;
15258 *expandedp
= false;
15260 /* Handle DST variants. */
15262 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
15263 if (d
->code
== fcode
)
15265 arg0
= CALL_EXPR_ARG (exp
, 0);
15266 arg1
= CALL_EXPR_ARG (exp
, 1);
15267 arg2
= CALL_EXPR_ARG (exp
, 2);
15268 op0
= expand_normal (arg0
);
15269 op1
= expand_normal (arg1
);
15270 op2
= expand_normal (arg2
);
15271 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
15272 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
15274 /* Invalid arguments, bail out before generating bad rtl. */
15275 if (arg0
== error_mark_node
15276 || arg1
== error_mark_node
15277 || arg2
== error_mark_node
)
15282 if (TREE_CODE (arg2
) != INTEGER_CST
15283 || TREE_INT_CST_LOW (arg2
) & ~0x3)
15285 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
15289 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
15290 op0
= copy_to_mode_reg (Pmode
, op0
);
15291 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
15292 op1
= copy_to_mode_reg (mode1
, op1
);
15294 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
15304 /* Expand vec_init builtin. */
15306 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
15308 machine_mode tmode
= TYPE_MODE (type
);
15309 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
15310 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
15312 gcc_assert (VECTOR_MODE_P (tmode
));
15313 gcc_assert (n_elt
== call_expr_nargs (exp
));
15315 if (!target
|| !register_operand (target
, tmode
))
15316 target
= gen_reg_rtx (tmode
);
15318 /* If we have a vector compromised of a single element, such as V1TImode, do
15319 the initialization directly. */
15320 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
15322 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
15323 emit_move_insn (target
, gen_lowpart (tmode
, x
));
15327 rtvec v
= rtvec_alloc (n_elt
);
15329 for (i
= 0; i
< n_elt
; ++i
)
15331 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
15332 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
15335 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
15341 /* Return the integer constant in ARG. Constrain it to be in the range
15342 of the subparts of VEC_TYPE; issue an error if not. */
15345 get_element_number (tree vec_type
, tree arg
)
15347 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
15349 if (!tree_fits_uhwi_p (arg
)
15350 || (elt
= tree_to_uhwi (arg
), elt
> max
))
15352 error ("selector must be an integer constant in the range 0..%wi", max
);
15359 /* Expand vec_set builtin. */
15361 altivec_expand_vec_set_builtin (tree exp
)
15363 machine_mode tmode
, mode1
;
15364 tree arg0
, arg1
, arg2
;
15368 arg0
= CALL_EXPR_ARG (exp
, 0);
15369 arg1
= CALL_EXPR_ARG (exp
, 1);
15370 arg2
= CALL_EXPR_ARG (exp
, 2);
15372 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
15373 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15374 gcc_assert (VECTOR_MODE_P (tmode
));
15376 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
15377 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
15378 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
15380 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
15381 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
15383 op0
= force_reg (tmode
, op0
);
15384 op1
= force_reg (mode1
, op1
);
15386 rs6000_expand_vector_set (op0
, op1
, elt
);
15391 /* Expand vec_ext builtin. */
15393 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
15395 machine_mode tmode
, mode0
;
15400 arg0
= CALL_EXPR_ARG (exp
, 0);
15401 arg1
= CALL_EXPR_ARG (exp
, 1);
15403 op0
= expand_normal (arg0
);
15404 op1
= expand_normal (arg1
);
15406 /* Call get_element_number to validate arg1 if it is a constant. */
15407 if (TREE_CODE (arg1
) == INTEGER_CST
)
15408 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
15410 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
15411 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
15412 gcc_assert (VECTOR_MODE_P (mode0
));
15414 op0
= force_reg (mode0
, op0
);
15416 if (optimize
|| !target
|| !register_operand (target
, tmode
))
15417 target
= gen_reg_rtx (tmode
);
15419 rs6000_expand_vector_extract (target
, op0
, op1
);
15424 /* Expand the builtin in EXP and store the result in TARGET. Store
15425 true in *EXPANDEDP if we found a builtin to expand. */
15427 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
15429 const struct builtin_description
*d
;
15431 enum insn_code icode
;
15432 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15433 tree arg0
, arg1
, arg2
;
15435 machine_mode tmode
, mode0
;
15436 enum rs6000_builtins fcode
15437 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15439 if (rs6000_overloaded_builtin_p (fcode
))
15442 error ("unresolved overload for Altivec builtin %qF", fndecl
);
15444 /* Given it is invalid, just generate a normal call. */
15445 return expand_call (exp
, target
, false);
15448 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
15452 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
15456 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
15464 case ALTIVEC_BUILTIN_STVX_V2DF
:
15465 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op
, exp
);
15466 case ALTIVEC_BUILTIN_STVX_V2DI
:
15467 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op
, exp
);
15468 case ALTIVEC_BUILTIN_STVX_V4SF
:
15469 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op
, exp
);
15470 case ALTIVEC_BUILTIN_STVX
:
15471 case ALTIVEC_BUILTIN_STVX_V4SI
:
15472 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op
, exp
);
15473 case ALTIVEC_BUILTIN_STVX_V8HI
:
15474 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op
, exp
);
15475 case ALTIVEC_BUILTIN_STVX_V16QI
:
15476 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op
, exp
);
15477 case ALTIVEC_BUILTIN_STVEBX
:
15478 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
15479 case ALTIVEC_BUILTIN_STVEHX
:
15480 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
15481 case ALTIVEC_BUILTIN_STVEWX
:
15482 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
15483 case ALTIVEC_BUILTIN_STVXL_V2DF
:
15484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
15485 case ALTIVEC_BUILTIN_STVXL_V2DI
:
15486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
15487 case ALTIVEC_BUILTIN_STVXL_V4SF
:
15488 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
15489 case ALTIVEC_BUILTIN_STVXL
:
15490 case ALTIVEC_BUILTIN_STVXL_V4SI
:
15491 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
15492 case ALTIVEC_BUILTIN_STVXL_V8HI
:
15493 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
15494 case ALTIVEC_BUILTIN_STVXL_V16QI
:
15495 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
15497 case ALTIVEC_BUILTIN_STVLX
:
15498 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
15499 case ALTIVEC_BUILTIN_STVLXL
:
15500 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
15501 case ALTIVEC_BUILTIN_STVRX
:
15502 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
15503 case ALTIVEC_BUILTIN_STVRXL
:
15504 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
15506 case P9V_BUILTIN_STXVL
:
15507 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
15509 case VSX_BUILTIN_STXVD2X_V1TI
:
15510 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
15511 case VSX_BUILTIN_STXVD2X_V2DF
:
15512 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
15513 case VSX_BUILTIN_STXVD2X_V2DI
:
15514 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
15515 case VSX_BUILTIN_STXVW4X_V4SF
:
15516 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
15517 case VSX_BUILTIN_STXVW4X_V4SI
:
15518 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
15519 case VSX_BUILTIN_STXVW4X_V8HI
:
15520 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
15521 case VSX_BUILTIN_STXVW4X_V16QI
:
15522 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
15524 /* For the following on big endian, it's ok to use any appropriate
15525 unaligned-supporting store, so use a generic expander. For
15526 little-endian, the exact element-reversing instruction must
15528 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
15530 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
15531 : CODE_FOR_vsx_st_elemrev_v2df
);
15532 return altivec_expand_stv_builtin (code
, exp
);
15534 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
15536 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
15537 : CODE_FOR_vsx_st_elemrev_v2di
);
15538 return altivec_expand_stv_builtin (code
, exp
);
15540 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
15542 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
15543 : CODE_FOR_vsx_st_elemrev_v4sf
);
15544 return altivec_expand_stv_builtin (code
, exp
);
15546 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
15548 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
15549 : CODE_FOR_vsx_st_elemrev_v4si
);
15550 return altivec_expand_stv_builtin (code
, exp
);
15552 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
15554 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
15555 : CODE_FOR_vsx_st_elemrev_v8hi
);
15556 return altivec_expand_stv_builtin (code
, exp
);
15558 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
15560 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
15561 : CODE_FOR_vsx_st_elemrev_v16qi
);
15562 return altivec_expand_stv_builtin (code
, exp
);
15565 case ALTIVEC_BUILTIN_MFVSCR
:
15566 icode
= CODE_FOR_altivec_mfvscr
;
15567 tmode
= insn_data
[icode
].operand
[0].mode
;
15570 || GET_MODE (target
) != tmode
15571 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15572 target
= gen_reg_rtx (tmode
);
15574 pat
= GEN_FCN (icode
) (target
);
15580 case ALTIVEC_BUILTIN_MTVSCR
:
15581 icode
= CODE_FOR_altivec_mtvscr
;
15582 arg0
= CALL_EXPR_ARG (exp
, 0);
15583 op0
= expand_normal (arg0
);
15584 mode0
= insn_data
[icode
].operand
[0].mode
;
15586 /* If we got invalid arguments bail out before generating bad rtl. */
15587 if (arg0
== error_mark_node
)
15590 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15591 op0
= copy_to_mode_reg (mode0
, op0
);
15593 pat
= GEN_FCN (icode
) (op0
);
15598 case ALTIVEC_BUILTIN_DSSALL
:
15599 emit_insn (gen_altivec_dssall ());
15602 case ALTIVEC_BUILTIN_DSS
:
15603 icode
= CODE_FOR_altivec_dss
;
15604 arg0
= CALL_EXPR_ARG (exp
, 0);
15606 op0
= expand_normal (arg0
);
15607 mode0
= insn_data
[icode
].operand
[0].mode
;
15609 /* If we got invalid arguments bail out before generating bad rtl. */
15610 if (arg0
== error_mark_node
)
15613 if (TREE_CODE (arg0
) != INTEGER_CST
15614 || TREE_INT_CST_LOW (arg0
) & ~0x3)
15616 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15620 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15621 op0
= copy_to_mode_reg (mode0
, op0
);
15623 emit_insn (gen_altivec_dss (op0
));
15626 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
15627 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
15628 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
15629 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
15630 case VSX_BUILTIN_VEC_INIT_V2DF
:
15631 case VSX_BUILTIN_VEC_INIT_V2DI
:
15632 case VSX_BUILTIN_VEC_INIT_V1TI
:
15633 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
15635 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
15636 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
15637 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
15638 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
15639 case VSX_BUILTIN_VEC_SET_V2DF
:
15640 case VSX_BUILTIN_VEC_SET_V2DI
:
15641 case VSX_BUILTIN_VEC_SET_V1TI
:
15642 return altivec_expand_vec_set_builtin (exp
);
15644 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
15645 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
15646 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
15647 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
15648 case VSX_BUILTIN_VEC_EXT_V2DF
:
15649 case VSX_BUILTIN_VEC_EXT_V2DI
:
15650 case VSX_BUILTIN_VEC_EXT_V1TI
:
15651 return altivec_expand_vec_ext_builtin (exp
, target
);
15653 case P9V_BUILTIN_VEXTRACT4B
:
15654 case P9V_BUILTIN_VEC_VEXTRACT4B
:
15655 arg1
= CALL_EXPR_ARG (exp
, 1);
15658 /* Generate a normal call if it is invalid. */
15659 if (arg1
== error_mark_node
)
15660 return expand_call (exp
, target
, false);
15662 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
15664 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15665 return expand_call (exp
, target
, false);
15669 case P9V_BUILTIN_VINSERT4B
:
15670 case P9V_BUILTIN_VINSERT4B_DI
:
15671 case P9V_BUILTIN_VEC_VINSERT4B
:
15672 arg2
= CALL_EXPR_ARG (exp
, 2);
15675 /* Generate a normal call if it is invalid. */
15676 if (arg2
== error_mark_node
)
15677 return expand_call (exp
, target
, false);
15679 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
15681 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15682 return expand_call (exp
, target
, false);
15688 /* Fall through. */
15691 /* Expand abs* operations. */
15693 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
15694 if (d
->code
== fcode
)
15695 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
15697 /* Expand the AltiVec predicates. */
15698 d
= bdesc_altivec_preds
;
15699 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
15700 if (d
->code
== fcode
)
15701 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
15703 /* LV* are funky. We initialized them differently. */
15706 case ALTIVEC_BUILTIN_LVSL
:
15707 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
15708 exp
, target
, false);
15709 case ALTIVEC_BUILTIN_LVSR
:
15710 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
15711 exp
, target
, false);
15712 case ALTIVEC_BUILTIN_LVEBX
:
15713 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
15714 exp
, target
, false);
15715 case ALTIVEC_BUILTIN_LVEHX
:
15716 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
15717 exp
, target
, false);
15718 case ALTIVEC_BUILTIN_LVEWX
:
15719 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
15720 exp
, target
, false);
15721 case ALTIVEC_BUILTIN_LVXL_V2DF
:
15722 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
15723 exp
, target
, false);
15724 case ALTIVEC_BUILTIN_LVXL_V2DI
:
15725 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
15726 exp
, target
, false);
15727 case ALTIVEC_BUILTIN_LVXL_V4SF
:
15728 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
15729 exp
, target
, false);
15730 case ALTIVEC_BUILTIN_LVXL
:
15731 case ALTIVEC_BUILTIN_LVXL_V4SI
:
15732 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
15733 exp
, target
, false);
15734 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15735 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15736 exp
, target
, false);
15737 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15738 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15739 exp
, target
, false);
15740 case ALTIVEC_BUILTIN_LVX_V2DF
:
15741 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op
,
15742 exp
, target
, false);
15743 case ALTIVEC_BUILTIN_LVX_V2DI
:
15744 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op
,
15745 exp
, target
, false);
15746 case ALTIVEC_BUILTIN_LVX_V4SF
:
15747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op
,
15748 exp
, target
, false);
15749 case ALTIVEC_BUILTIN_LVX
:
15750 case ALTIVEC_BUILTIN_LVX_V4SI
:
15751 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op
,
15752 exp
, target
, false);
15753 case ALTIVEC_BUILTIN_LVX_V8HI
:
15754 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op
,
15755 exp
, target
, false);
15756 case ALTIVEC_BUILTIN_LVX_V16QI
:
15757 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op
,
15758 exp
, target
, false);
15759 case ALTIVEC_BUILTIN_LVLX
:
15760 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15761 exp
, target
, true);
15762 case ALTIVEC_BUILTIN_LVLXL
:
15763 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15764 exp
, target
, true);
15765 case ALTIVEC_BUILTIN_LVRX
:
15766 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15767 exp
, target
, true);
15768 case ALTIVEC_BUILTIN_LVRXL
:
15769 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15770 exp
, target
, true);
15771 case VSX_BUILTIN_LXVD2X_V1TI
:
15772 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15773 exp
, target
, false);
15774 case VSX_BUILTIN_LXVD2X_V2DF
:
15775 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15776 exp
, target
, false);
15777 case VSX_BUILTIN_LXVD2X_V2DI
:
15778 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15779 exp
, target
, false);
15780 case VSX_BUILTIN_LXVW4X_V4SF
:
15781 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15782 exp
, target
, false);
15783 case VSX_BUILTIN_LXVW4X_V4SI
:
15784 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15785 exp
, target
, false);
15786 case VSX_BUILTIN_LXVW4X_V8HI
:
15787 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15788 exp
, target
, false);
15789 case VSX_BUILTIN_LXVW4X_V16QI
:
15790 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15791 exp
, target
, false);
15792 /* For the following on big endian, it's ok to use any appropriate
15793 unaligned-supporting load, so use a generic expander. For
15794 little-endian, the exact element-reversing instruction must
15796 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15798 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15799 : CODE_FOR_vsx_ld_elemrev_v2df
);
15800 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15802 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15804 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15805 : CODE_FOR_vsx_ld_elemrev_v2di
);
15806 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15808 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15810 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15811 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15812 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15814 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15816 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15817 : CODE_FOR_vsx_ld_elemrev_v4si
);
15818 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15820 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15822 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15823 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15824 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15826 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15828 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15829 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15830 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15835 /* Fall through. */
15838 /* XL_BE We initialized them to always load in big endian order. */
15841 case VSX_BUILTIN_XL_BE_V2DI
:
15843 enum insn_code code
= CODE_FOR_vsx_load_v2di
;
15844 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15847 case VSX_BUILTIN_XL_BE_V4SI
:
15849 enum insn_code code
= CODE_FOR_vsx_load_v4si
;
15850 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15853 case VSX_BUILTIN_XL_BE_V8HI
:
15855 enum insn_code code
= CODE_FOR_vsx_load_v8hi
;
15856 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15859 case VSX_BUILTIN_XL_BE_V16QI
:
15861 enum insn_code code
= CODE_FOR_vsx_load_v16qi
;
15862 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15865 case VSX_BUILTIN_XL_BE_V2DF
:
15867 enum insn_code code
= CODE_FOR_vsx_load_v2df
;
15868 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15871 case VSX_BUILTIN_XL_BE_V4SF
:
15873 enum insn_code code
= CODE_FOR_vsx_load_v4sf
;
15874 return altivec_expand_xl_be_builtin (code
, exp
, target
, false);
15879 /* Fall through. */
15882 *expandedp
= false;
15886 /* Expand the builtin in EXP and store the result in TARGET. Store
15887 true in *EXPANDEDP if we found a builtin to expand. */
15889 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
15891 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
15892 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15893 const struct builtin_description
*d
;
15900 case PAIRED_BUILTIN_STX
:
15901 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
15902 case PAIRED_BUILTIN_LX
:
15903 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
15906 /* Fall through. */
15909 /* Expand the paired predicates. */
15910 d
= bdesc_paired_preds
;
15911 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
15912 if (d
->code
== fcode
)
15913 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
15915 *expandedp
= false;
15920 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
15922 rtx pat
, scratch
, tmp
;
15923 tree form
= CALL_EXPR_ARG (exp
, 0);
15924 tree arg0
= CALL_EXPR_ARG (exp
, 1);
15925 tree arg1
= CALL_EXPR_ARG (exp
, 2);
15926 rtx op0
= expand_normal (arg0
);
15927 rtx op1
= expand_normal (arg1
);
15928 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
15929 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
15931 enum rtx_code code
;
15933 if (TREE_CODE (form
) != INTEGER_CST
)
15935 error ("argument 1 of %s must be a constant",
15936 "__builtin_paired_predicate");
15940 form_int
= TREE_INT_CST_LOW (form
);
15942 gcc_assert (mode0
== mode1
);
15944 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
15948 || GET_MODE (target
) != SImode
15949 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
15950 target
= gen_reg_rtx (SImode
);
15951 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
15952 op0
= copy_to_mode_reg (mode0
, op0
);
15953 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
15954 op1
= copy_to_mode_reg (mode1
, op1
);
15956 scratch
= gen_reg_rtx (CCFPmode
);
15958 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
15980 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
15983 error ("argument 1 of %qs is out of range",
15984 "__builtin_paired_predicate");
15988 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
15989 emit_move_insn (target
, tmp
);
15993 /* Raise an error message for a builtin function that is called without the
15994 appropriate target options being set. */
15997 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
15999 size_t uns_fncode
= (size_t) fncode
;
16000 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
16001 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
16003 gcc_assert (name
!= NULL
);
16004 if ((fnmask
& RS6000_BTM_CELL
) != 0)
16005 error ("builtin function %qs is only valid for the cell processor", name
);
16006 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
16007 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
16008 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
16009 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
16010 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
16011 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
16012 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
16013 error ("builtin function %qs requires the %qs option", name
, "-mpaired");
16014 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16015 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
16016 error ("builtin function %qs requires the %qs and %qs options",
16017 name
, "-mhard-dfp", "-mpower8-vector");
16018 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
16019 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
16020 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
16021 error ("builtin function %qs requires the %qs option", name
,
16022 "-mpower8-vector");
16023 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16024 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
16025 error ("builtin function %qs requires the %qs and %qs options",
16026 name
, "-mcpu=power9", "-m64");
16027 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
16028 error ("builtin function %qs requires the %qs option", name
,
16030 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16031 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
16032 error ("builtin function %qs requires the %qs and %qs options",
16033 name
, "-mcpu=power9", "-m64");
16034 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
16035 error ("builtin function %qs requires the %qs option", name
,
16037 else if ((fnmask
& (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16038 == (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
16039 error ("builtin function %qs requires the %qs and %qs options",
16040 name
, "-mhard-float", "-mlong-double-128");
16041 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
16042 error ("builtin function %qs requires the %qs option", name
,
16044 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
16045 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
16047 error ("builtin function %qs is not supported with the current options",
16051 /* Target hook for early folding of built-ins, shamelessly stolen
16055 rs6000_fold_builtin (tree fndecl
, int n_args ATTRIBUTE_UNUSED
,
16056 tree
*args
, bool ignore ATTRIBUTE_UNUSED
)
16058 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
16060 enum rs6000_builtins fn_code
16061 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16064 case RS6000_BUILTIN_NANQ
:
16065 case RS6000_BUILTIN_NANSQ
:
16067 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16068 const char *str
= c_getstr (*args
);
16069 int quiet
= fn_code
== RS6000_BUILTIN_NANQ
;
16070 REAL_VALUE_TYPE real
;
16072 if (str
&& real_nan (&real
, str
, quiet
, TYPE_MODE (type
)))
16073 return build_real (type
, real
);
16076 case RS6000_BUILTIN_INFQ
:
16077 case RS6000_BUILTIN_HUGE_VALQ
:
16079 tree type
= TREE_TYPE (TREE_TYPE (fndecl
));
16080 REAL_VALUE_TYPE inf
;
16082 return build_real (type
, inf
);
16088 #ifdef SUBTARGET_FOLD_BUILTIN
16089 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
16095 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16096 a constant, use rs6000_fold_builtin.) */
16099 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
16101 gimple
*stmt
= gsi_stmt (*gsi
);
16102 tree fndecl
= gimple_call_fndecl (stmt
);
16103 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
16104 enum rs6000_builtins fn_code
16105 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
16106 tree arg0
, arg1
, lhs
;
16108 size_t uns_fncode
= (size_t) fn_code
;
16109 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
16110 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
16111 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
16112 ? get_insn_name ((int) icode
)
16115 if (TARGET_DEBUG_BUILTIN
)
16116 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
16117 fn_code
, fn_name1
, fn_name2
);
16119 if (!rs6000_fold_gimple
)
16122 /* Generic solution to prevent gimple folding of code without a LHS. */
16123 if (!gimple_call_lhs (stmt
))
16128 /* Flavors of vec_add. We deliberately don't expand
16129 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16130 TImode, resulting in much poorer code generation. */
16131 case ALTIVEC_BUILTIN_VADDUBM
:
16132 case ALTIVEC_BUILTIN_VADDUHM
:
16133 case ALTIVEC_BUILTIN_VADDUWM
:
16134 case P8V_BUILTIN_VADDUDM
:
16135 case ALTIVEC_BUILTIN_VADDFP
:
16136 case VSX_BUILTIN_XVADDDP
:
16138 arg0
= gimple_call_arg (stmt
, 0);
16139 arg1
= gimple_call_arg (stmt
, 1);
16140 lhs
= gimple_call_lhs (stmt
);
16141 gimple
*g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
16142 gimple_set_location (g
, gimple_location (stmt
));
16143 gsi_replace (gsi
, g
, true);
16146 /* Flavors of vec_sub. We deliberately don't expand
16147 P8V_BUILTIN_VSUBUQM. */
16148 case ALTIVEC_BUILTIN_VSUBUBM
:
16149 case ALTIVEC_BUILTIN_VSUBUHM
:
16150 case ALTIVEC_BUILTIN_VSUBUWM
:
16151 case P8V_BUILTIN_VSUBUDM
:
16152 case ALTIVEC_BUILTIN_VSUBFP
:
16153 case VSX_BUILTIN_XVSUBDP
:
16155 arg0
= gimple_call_arg (stmt
, 0);
16156 arg1
= gimple_call_arg (stmt
, 1);
16157 lhs
= gimple_call_lhs (stmt
);
16158 gimple
*g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
16159 gimple_set_location (g
, gimple_location (stmt
));
16160 gsi_replace (gsi
, g
, true);
16163 case VSX_BUILTIN_XVMULSP
:
16164 case VSX_BUILTIN_XVMULDP
:
16166 arg0
= gimple_call_arg (stmt
, 0);
16167 arg1
= gimple_call_arg (stmt
, 1);
16168 lhs
= gimple_call_lhs (stmt
);
16169 gimple
*g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
16170 gimple_set_location (g
, gimple_location (stmt
));
16171 gsi_replace (gsi
, g
, true);
16174 /* Even element flavors of vec_mul (signed). */
16175 case ALTIVEC_BUILTIN_VMULESB
:
16176 case ALTIVEC_BUILTIN_VMULESH
:
16177 /* Even element flavors of vec_mul (unsigned). */
16178 case ALTIVEC_BUILTIN_VMULEUB
:
16179 case ALTIVEC_BUILTIN_VMULEUH
:
16181 arg0
= gimple_call_arg (stmt
, 0);
16182 arg1
= gimple_call_arg (stmt
, 1);
16183 lhs
= gimple_call_lhs (stmt
);
16184 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
16185 gimple_set_location (g
, gimple_location (stmt
));
16186 gsi_replace (gsi
, g
, true);
16189 /* Odd element flavors of vec_mul (signed). */
16190 case ALTIVEC_BUILTIN_VMULOSB
:
16191 case ALTIVEC_BUILTIN_VMULOSH
:
16192 /* Odd element flavors of vec_mul (unsigned). */
16193 case ALTIVEC_BUILTIN_VMULOUB
:
16194 case ALTIVEC_BUILTIN_VMULOUH
:
16196 arg0
= gimple_call_arg (stmt
, 0);
16197 arg1
= gimple_call_arg (stmt
, 1);
16198 lhs
= gimple_call_lhs (stmt
);
16199 gimple
*g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
16200 gimple_set_location (g
, gimple_location (stmt
));
16201 gsi_replace (gsi
, g
, true);
16204 /* Flavors of vec_div (Integer). */
16205 case VSX_BUILTIN_DIV_V2DI
:
16206 case VSX_BUILTIN_UDIV_V2DI
:
16208 arg0
= gimple_call_arg (stmt
, 0);
16209 arg1
= gimple_call_arg (stmt
, 1);
16210 lhs
= gimple_call_lhs (stmt
);
16211 gimple
*g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
16212 gimple_set_location (g
, gimple_location (stmt
));
16213 gsi_replace (gsi
, g
, true);
16216 /* Flavors of vec_div (Float). */
16217 case VSX_BUILTIN_XVDIVSP
:
16218 case VSX_BUILTIN_XVDIVDP
:
16220 arg0
= gimple_call_arg (stmt
, 0);
16221 arg1
= gimple_call_arg (stmt
, 1);
16222 lhs
= gimple_call_lhs (stmt
);
16223 gimple
*g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
16224 gimple_set_location (g
, gimple_location (stmt
));
16225 gsi_replace (gsi
, g
, true);
16228 /* Flavors of vec_and. */
16229 case ALTIVEC_BUILTIN_VAND
:
16231 arg0
= gimple_call_arg (stmt
, 0);
16232 arg1
= gimple_call_arg (stmt
, 1);
16233 lhs
= gimple_call_lhs (stmt
);
16234 gimple
*g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
16235 gimple_set_location (g
, gimple_location (stmt
));
16236 gsi_replace (gsi
, g
, true);
16239 /* Flavors of vec_andc. */
16240 case ALTIVEC_BUILTIN_VANDC
:
16242 arg0
= gimple_call_arg (stmt
, 0);
16243 arg1
= gimple_call_arg (stmt
, 1);
16244 lhs
= gimple_call_lhs (stmt
);
16245 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16246 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16247 gimple_set_location (g
, gimple_location (stmt
));
16248 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16249 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
16250 gimple_set_location (g
, gimple_location (stmt
));
16251 gsi_replace (gsi
, g
, true);
16254 /* Flavors of vec_nand. */
16255 case P8V_BUILTIN_VEC_NAND
:
16256 case P8V_BUILTIN_NAND_V16QI
:
16257 case P8V_BUILTIN_NAND_V8HI
:
16258 case P8V_BUILTIN_NAND_V4SI
:
16259 case P8V_BUILTIN_NAND_V4SF
:
16260 case P8V_BUILTIN_NAND_V2DF
:
16261 case P8V_BUILTIN_NAND_V2DI
:
16263 arg0
= gimple_call_arg (stmt
, 0);
16264 arg1
= gimple_call_arg (stmt
, 1);
16265 lhs
= gimple_call_lhs (stmt
);
16266 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16267 gimple
*g
= gimple_build_assign(temp
, BIT_AND_EXPR
, arg0
, arg1
);
16268 gimple_set_location (g
, gimple_location (stmt
));
16269 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16270 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16271 gimple_set_location (g
, gimple_location (stmt
));
16272 gsi_replace (gsi
, g
, true);
16275 /* Flavors of vec_or. */
16276 case ALTIVEC_BUILTIN_VOR
:
16278 arg0
= gimple_call_arg (stmt
, 0);
16279 arg1
= gimple_call_arg (stmt
, 1);
16280 lhs
= gimple_call_lhs (stmt
);
16281 gimple
*g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
16282 gimple_set_location (g
, gimple_location (stmt
));
16283 gsi_replace (gsi
, g
, true);
16286 /* flavors of vec_orc. */
16287 case P8V_BUILTIN_ORC_V16QI
:
16288 case P8V_BUILTIN_ORC_V8HI
:
16289 case P8V_BUILTIN_ORC_V4SI
:
16290 case P8V_BUILTIN_ORC_V4SF
:
16291 case P8V_BUILTIN_ORC_V2DF
:
16292 case P8V_BUILTIN_ORC_V2DI
:
16294 arg0
= gimple_call_arg (stmt
, 0);
16295 arg1
= gimple_call_arg (stmt
, 1);
16296 lhs
= gimple_call_lhs (stmt
);
16297 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16298 gimple
*g
= gimple_build_assign(temp
, BIT_NOT_EXPR
, arg1
);
16299 gimple_set_location (g
, gimple_location (stmt
));
16300 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16301 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
16302 gimple_set_location (g
, gimple_location (stmt
));
16303 gsi_replace (gsi
, g
, true);
16306 /* Flavors of vec_xor. */
16307 case ALTIVEC_BUILTIN_VXOR
:
16309 arg0
= gimple_call_arg (stmt
, 0);
16310 arg1
= gimple_call_arg (stmt
, 1);
16311 lhs
= gimple_call_lhs (stmt
);
16312 gimple
*g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
16313 gimple_set_location (g
, gimple_location (stmt
));
16314 gsi_replace (gsi
, g
, true);
16317 /* Flavors of vec_nor. */
16318 case ALTIVEC_BUILTIN_VNOR
:
16320 arg0
= gimple_call_arg (stmt
, 0);
16321 arg1
= gimple_call_arg (stmt
, 1);
16322 lhs
= gimple_call_lhs (stmt
);
16323 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16324 gimple
*g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
16325 gimple_set_location (g
, gimple_location (stmt
));
16326 gsi_insert_before(gsi
, g
, GSI_SAME_STMT
);
16327 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16328 gimple_set_location (g
, gimple_location (stmt
));
16329 gsi_replace (gsi
, g
, true);
16332 /* flavors of vec_abs. */
16333 case ALTIVEC_BUILTIN_ABS_V16QI
:
16334 case ALTIVEC_BUILTIN_ABS_V8HI
:
16335 case ALTIVEC_BUILTIN_ABS_V4SI
:
16336 case ALTIVEC_BUILTIN_ABS_V4SF
:
16337 case P8V_BUILTIN_ABS_V2DI
:
16338 case VSX_BUILTIN_XVABSDP
:
16340 arg0
= gimple_call_arg (stmt
, 0);
16341 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16342 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16344 lhs
= gimple_call_lhs (stmt
);
16345 gimple
*g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
16346 gimple_set_location (g
, gimple_location (stmt
));
16347 gsi_replace (gsi
, g
, true);
16350 /* flavors of vec_min. */
16351 case VSX_BUILTIN_XVMINDP
:
16352 case P8V_BUILTIN_VMINSD
:
16353 case P8V_BUILTIN_VMINUD
:
16354 case ALTIVEC_BUILTIN_VMINSB
:
16355 case ALTIVEC_BUILTIN_VMINSH
:
16356 case ALTIVEC_BUILTIN_VMINSW
:
16357 case ALTIVEC_BUILTIN_VMINUB
:
16358 case ALTIVEC_BUILTIN_VMINUH
:
16359 case ALTIVEC_BUILTIN_VMINUW
:
16360 case ALTIVEC_BUILTIN_VMINFP
:
16362 arg0
= gimple_call_arg (stmt
, 0);
16363 arg1
= gimple_call_arg (stmt
, 1);
16364 lhs
= gimple_call_lhs (stmt
);
16365 gimple
*g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
16366 gimple_set_location (g
, gimple_location (stmt
));
16367 gsi_replace (gsi
, g
, true);
16370 /* flavors of vec_max. */
16371 case VSX_BUILTIN_XVMAXDP
:
16372 case P8V_BUILTIN_VMAXSD
:
16373 case P8V_BUILTIN_VMAXUD
:
16374 case ALTIVEC_BUILTIN_VMAXSB
:
16375 case ALTIVEC_BUILTIN_VMAXSH
:
16376 case ALTIVEC_BUILTIN_VMAXSW
:
16377 case ALTIVEC_BUILTIN_VMAXUB
:
16378 case ALTIVEC_BUILTIN_VMAXUH
:
16379 case ALTIVEC_BUILTIN_VMAXUW
:
16380 case ALTIVEC_BUILTIN_VMAXFP
:
16382 arg0
= gimple_call_arg (stmt
, 0);
16383 arg1
= gimple_call_arg (stmt
, 1);
16384 lhs
= gimple_call_lhs (stmt
);
16385 gimple
*g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
16386 gimple_set_location (g
, gimple_location (stmt
));
16387 gsi_replace (gsi
, g
, true);
16390 /* Flavors of vec_eqv. */
16391 case P8V_BUILTIN_EQV_V16QI
:
16392 case P8V_BUILTIN_EQV_V8HI
:
16393 case P8V_BUILTIN_EQV_V4SI
:
16394 case P8V_BUILTIN_EQV_V4SF
:
16395 case P8V_BUILTIN_EQV_V2DF
:
16396 case P8V_BUILTIN_EQV_V2DI
:
16398 arg0
= gimple_call_arg (stmt
, 0);
16399 arg1
= gimple_call_arg (stmt
, 1);
16400 lhs
= gimple_call_lhs (stmt
);
16401 tree temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
16402 gimple
*g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
16403 gimple_set_location (g
, gimple_location (stmt
));
16404 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
16405 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
16406 gimple_set_location (g
, gimple_location (stmt
));
16407 gsi_replace (gsi
, g
, true);
16410 /* Flavors of vec_rotate_left. */
16411 case ALTIVEC_BUILTIN_VRLB
:
16412 case ALTIVEC_BUILTIN_VRLH
:
16413 case ALTIVEC_BUILTIN_VRLW
:
16414 case P8V_BUILTIN_VRLD
:
16416 arg0
= gimple_call_arg (stmt
, 0);
16417 arg1
= gimple_call_arg (stmt
, 1);
16418 lhs
= gimple_call_lhs (stmt
);
16419 gimple
*g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
16420 gimple_set_location (g
, gimple_location (stmt
));
16421 gsi_replace (gsi
, g
, true);
16424 /* Flavors of vector shift right algebraic.
16425 vec_sra{b,h,w} -> vsra{b,h,w}. */
16426 case ALTIVEC_BUILTIN_VSRAB
:
16427 case ALTIVEC_BUILTIN_VSRAH
:
16428 case ALTIVEC_BUILTIN_VSRAW
:
16429 case P8V_BUILTIN_VSRAD
:
16431 arg0
= gimple_call_arg (stmt
, 0);
16432 arg1
= gimple_call_arg (stmt
, 1);
16433 lhs
= gimple_call_lhs (stmt
);
16434 gimple
*g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
16435 gimple_set_location (g
, gimple_location (stmt
));
16436 gsi_replace (gsi
, g
, true);
16439 /* Flavors of vector shift left.
16440 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16441 case ALTIVEC_BUILTIN_VSLB
:
16442 case ALTIVEC_BUILTIN_VSLH
:
16443 case ALTIVEC_BUILTIN_VSLW
:
16444 case P8V_BUILTIN_VSLD
:
16446 arg0
= gimple_call_arg (stmt
, 0);
16447 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
16448 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
16450 arg1
= gimple_call_arg (stmt
, 1);
16451 lhs
= gimple_call_lhs (stmt
);
16452 gimple
*g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, arg1
);
16453 gimple_set_location (g
, gimple_location (stmt
));
16454 gsi_replace (gsi
, g
, true);
16457 /* Flavors of vector shift right. */
16458 case ALTIVEC_BUILTIN_VSRB
:
16459 case ALTIVEC_BUILTIN_VSRH
:
16460 case ALTIVEC_BUILTIN_VSRW
:
16461 case P8V_BUILTIN_VSRD
:
16463 arg0
= gimple_call_arg (stmt
, 0);
16464 arg1
= gimple_call_arg (stmt
, 1);
16465 lhs
= gimple_call_lhs (stmt
);
16466 gimple_seq stmts
= NULL
;
16467 /* Convert arg0 to unsigned. */
16469 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
16470 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
16472 = gimple_build (&stmts
, RSHIFT_EXPR
,
16473 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
16474 /* Convert result back to the lhs type. */
16475 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
16476 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16477 update_call_from_tree (gsi
, res
);
16481 if (TARGET_DEBUG_BUILTIN
)
16482 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16483 fn_code
, fn_name1
, fn_name2
);
16490 /* Expand an expression EXP that calls a built-in function,
16491 with result going to TARGET if that's convenient
16492 (and in mode MODE if that's convenient).
16493 SUBTARGET may be used as the target for computing one of EXP's operands.
16494 IGNORE is nonzero if the value is to be ignored. */
16497 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16498 machine_mode mode ATTRIBUTE_UNUSED
,
16499 int ignore ATTRIBUTE_UNUSED
)
16501 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16502 enum rs6000_builtins fcode
16503 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16504 size_t uns_fcode
= (size_t)fcode
;
16505 const struct builtin_description
*d
;
16509 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16510 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16512 if (TARGET_DEBUG_BUILTIN
)
16514 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16515 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16516 const char *name2
= (icode
!= CODE_FOR_nothing
)
16517 ? get_insn_name ((int) icode
)
16521 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16523 default: name3
= "unknown"; break;
16524 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16525 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16526 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16527 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16528 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16529 case RS6000_BTC_ABS
: name3
= "abs"; break;
16530 case RS6000_BTC_DST
: name3
= "dst"; break;
16535 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16536 (name1
) ? name1
: "---", fcode
,
16537 (name2
) ? name2
: "---", (int) icode
,
16539 func_valid_p
? "" : ", not valid");
16544 rs6000_invalid_builtin (fcode
);
16546 /* Given it is invalid, just generate a normal call. */
16547 return expand_call (exp
, target
, ignore
);
16552 case RS6000_BUILTIN_RECIP
:
16553 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16555 case RS6000_BUILTIN_RECIPF
:
16556 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16558 case RS6000_BUILTIN_RSQRTF
:
16559 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16561 case RS6000_BUILTIN_RSQRT
:
16562 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16564 case POWER7_BUILTIN_BPERMD
:
16565 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16566 ? CODE_FOR_bpermd_di
16567 : CODE_FOR_bpermd_si
), exp
, target
);
16569 case RS6000_BUILTIN_GET_TB
:
16570 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16573 case RS6000_BUILTIN_MFTB
:
16574 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16575 ? CODE_FOR_rs6000_mftb_di
16576 : CODE_FOR_rs6000_mftb_si
),
16579 case RS6000_BUILTIN_MFFS
:
16580 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16582 case RS6000_BUILTIN_MTFSF
:
16583 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16585 case RS6000_BUILTIN_CPU_INIT
:
16586 case RS6000_BUILTIN_CPU_IS
:
16587 case RS6000_BUILTIN_CPU_SUPPORTS
:
16588 return cpu_expand_builtin (fcode
, exp
, target
);
16590 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16591 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16593 int icode
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16594 : (int) CODE_FOR_altivec_lvsl_direct
);
16595 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
16596 machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
16600 gcc_assert (TARGET_ALTIVEC
);
16602 arg
= CALL_EXPR_ARG (exp
, 0);
16603 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16604 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16605 addr
= memory_address (mode
, op
);
16606 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16610 /* For the load case need to negate the address. */
16611 op
= gen_reg_rtx (GET_MODE (addr
));
16612 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16614 op
= gen_rtx_MEM (mode
, op
);
16617 || GET_MODE (target
) != tmode
16618 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
16619 target
= gen_reg_rtx (tmode
);
16621 pat
= GEN_FCN (icode
) (target
, op
);
16629 case ALTIVEC_BUILTIN_VCFUX
:
16630 case ALTIVEC_BUILTIN_VCFSX
:
16631 case ALTIVEC_BUILTIN_VCTUXS
:
16632 case ALTIVEC_BUILTIN_VCTSXS
:
16633 /* FIXME: There's got to be a nicer way to handle this case than
16634 constructing a new CALL_EXPR. */
16635 if (call_expr_nargs (exp
) == 1)
16637 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16638 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16646 if (TARGET_ALTIVEC
)
16648 ret
= altivec_expand_builtin (exp
, target
, &success
);
16653 if (TARGET_PAIRED_FLOAT
)
16655 ret
= paired_expand_builtin (exp
, target
, &success
);
16662 ret
= htm_expand_builtin (exp
, target
, &success
);
16668 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16669 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16670 gcc_assert (attr
== RS6000_BTC_UNARY
16671 || attr
== RS6000_BTC_BINARY
16672 || attr
== RS6000_BTC_TERNARY
16673 || attr
== RS6000_BTC_SPECIAL
);
16675 /* Handle simple unary operations. */
16677 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16678 if (d
->code
== fcode
)
16679 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
16681 /* Handle simple binary operations. */
16683 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16684 if (d
->code
== fcode
)
16685 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
16687 /* Handle simple ternary operations. */
16689 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16690 if (d
->code
== fcode
)
16691 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
16693 /* Handle simple no-argument operations. */
16695 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16696 if (d
->code
== fcode
)
16697 return rs6000_expand_zeroop_builtin (d
->icode
, target
);
16699 gcc_unreachable ();
16702 /* Create a builtin vector type with a name. Taking care not to give
16703 the canonical type a name. */
16706 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16708 tree result
= build_vector_type (elt_type
, num_elts
);
16710 /* Copy so we don't give the canonical type a name. */
16711 result
= build_variant_type_copy (result
);
16713 add_builtin_type (name
, result
);
16719 rs6000_init_builtins (void)
16725 if (TARGET_DEBUG_BUILTIN
)
16726 fprintf (stderr
, "rs6000_init_builtins%s%s%s\n",
16727 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
16728 (TARGET_ALTIVEC
) ? ", altivec" : "",
16729 (TARGET_VSX
) ? ", vsx" : "");
16731 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
16732 V2SF_type_node
= build_vector_type (float_type_node
, 2);
16733 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16734 : "__vector long long",
16735 intDI_type_node
, 2);
16736 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16737 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16738 intSI_type_node
, 4);
16739 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16740 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16741 intHI_type_node
, 8);
16742 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16743 intQI_type_node
, 16);
16745 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16746 unsigned_intQI_type_node
, 16);
16747 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16748 unsigned_intHI_type_node
, 8);
16749 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16750 unsigned_intSI_type_node
, 4);
16751 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16752 ? "__vector unsigned long"
16753 : "__vector unsigned long long",
16754 unsigned_intDI_type_node
, 2);
16756 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
16757 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
16758 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
16759 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16761 const_str_type_node
16762 = build_pointer_type (build_qualified_type (char_type_node
,
16765 /* We use V1TI mode as a special container to hold __int128_t items that
16766 must live in VSX registers. */
16767 if (intTI_type_node
)
16769 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16770 intTI_type_node
, 1);
16771 unsigned_V1TI_type_node
16772 = rs6000_vector_type ("__vector unsigned __int128",
16773 unsigned_intTI_type_node
, 1);
16776 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16777 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16778 'vector unsigned short'. */
16780 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16781 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16782 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16783 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16784 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16786 long_integer_type_internal_node
= long_integer_type_node
;
16787 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16788 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16789 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16790 intQI_type_internal_node
= intQI_type_node
;
16791 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16792 intHI_type_internal_node
= intHI_type_node
;
16793 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16794 intSI_type_internal_node
= intSI_type_node
;
16795 uintSI_type_internal_node
= unsigned_intSI_type_node
;
16796 intDI_type_internal_node
= intDI_type_node
;
16797 uintDI_type_internal_node
= unsigned_intDI_type_node
;
16798 intTI_type_internal_node
= intTI_type_node
;
16799 uintTI_type_internal_node
= unsigned_intTI_type_node
;
16800 float_type_internal_node
= float_type_node
;
16801 double_type_internal_node
= double_type_node
;
16802 long_double_type_internal_node
= long_double_type_node
;
16803 dfloat64_type_internal_node
= dfloat64_type_node
;
16804 dfloat128_type_internal_node
= dfloat128_type_node
;
16805 void_type_internal_node
= void_type_node
;
16807 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16808 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16809 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16810 format that uses a pair of doubles, depending on the switches and
16813 We do not enable the actual __float128 keyword unless the user explicitly
16814 asks for it, because the library support is not yet complete.
16816 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16817 floating point, we need make sure the type is non-zero or else self-test
16818 fails during bootstrap.
16820 We don't register a built-in type for __ibm128 if the type is the same as
16821 long double. Instead we add a #define for __ibm128 in
16822 rs6000_cpu_cpp_builtins to long double. */
16823 if (TARGET_LONG_DOUBLE_128
&& FLOAT128_IEEE_P (TFmode
))
16825 ibm128_float_type_node
= make_node (REAL_TYPE
);
16826 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16827 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
16828 layout_type (ibm128_float_type_node
);
16830 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
16834 ibm128_float_type_node
= long_double_type_node
;
16836 if (TARGET_FLOAT128_KEYWORD
)
16838 ieee128_float_type_node
= float128_type_node
;
16839 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16843 else if (TARGET_FLOAT128_TYPE
)
16845 ieee128_float_type_node
= make_node (REAL_TYPE
);
16846 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16847 SET_TYPE_MODE (ieee128_float_type_node
, KFmode
);
16848 layout_type (ieee128_float_type_node
);
16850 /* If we are not exporting the __float128/_Float128 keywords, we need a
16851 keyword to get the types created. Use __ieee128 as the dummy
16853 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16858 ieee128_float_type_node
= long_double_type_node
;
16860 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16862 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
16863 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
16864 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
16865 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
16866 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
16867 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
16868 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
16869 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
16870 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
16871 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
16872 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
16873 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
16874 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
16875 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
16876 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
16877 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
16878 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
16879 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
16880 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
16881 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
16882 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
16883 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
16884 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
16885 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
16886 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
16887 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
16888 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
16889 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
16890 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
16892 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
16893 TYPE_NAME (bool_char_type_node
) = tdecl
;
16895 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
16896 TYPE_NAME (bool_short_type_node
) = tdecl
;
16898 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
16899 TYPE_NAME (bool_int_type_node
) = tdecl
;
16901 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
16902 TYPE_NAME (pixel_type_node
) = tdecl
;
16904 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
16905 bool_char_type_node
, 16);
16906 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
16907 bool_short_type_node
, 8);
16908 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
16909 bool_int_type_node
, 4);
16910 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16911 ? "__vector __bool long"
16912 : "__vector __bool long long",
16913 bool_long_type_node
, 2);
16914 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
16915 pixel_type_node
, 8);
16917 /* Paired builtins are only available if you build a compiler with the
16918 appropriate options, so only create those builtins with the appropriate
16919 compiler option. Create Altivec and VSX builtins on machines with at
16920 least the general purpose extensions (970 and newer) to allow the use of
16921 the target attribute. */
16922 if (TARGET_PAIRED_FLOAT
)
16923 paired_init_builtins ();
16924 if (TARGET_EXTRA_BUILTINS
)
16925 altivec_init_builtins ();
16927 htm_init_builtins ();
16929 if (TARGET_EXTRA_BUILTINS
|| TARGET_PAIRED_FLOAT
)
16930 rs6000_common_init_builtins ();
16932 ftype
= build_function_type_list (ieee128_float_type_node
,
16933 const_str_type_node
, NULL_TREE
);
16934 def_builtin ("__builtin_nanq", ftype
, RS6000_BUILTIN_NANQ
);
16935 def_builtin ("__builtin_nansq", ftype
, RS6000_BUILTIN_NANSQ
);
16937 ftype
= build_function_type_list (ieee128_float_type_node
, NULL_TREE
);
16938 def_builtin ("__builtin_infq", ftype
, RS6000_BUILTIN_INFQ
);
16939 def_builtin ("__builtin_huge_valq", ftype
, RS6000_BUILTIN_HUGE_VALQ
);
16941 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
16942 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
16943 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
16945 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
16946 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
16947 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
16949 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
16950 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
16951 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
16953 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
16954 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
16955 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
16957 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
16958 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
16959 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
16960 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
16962 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16964 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
16967 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16970 ftype
= build_function_type_list (unsigned_intSI_type_node
,
16972 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
16974 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
16975 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
16977 ftype
= build_function_type_list (void_type_node
,
16978 intSI_type_node
, double_type_node
,
16980 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
16982 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
16983 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
16985 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
16987 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
16988 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
16990 /* AIX libm provides clog as __clog. */
16991 if (TARGET_XCOFF
&&
16992 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
16993 set_user_assembler_name (tdecl
, "__clog");
16995 #ifdef SUBTARGET_INIT_BUILTINS
16996 SUBTARGET_INIT_BUILTINS
;
17000 /* Returns the rs6000 builtin decl for CODE. */
17003 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
17005 HOST_WIDE_INT fnmask
;
17007 if (code
>= RS6000_BUILTIN_COUNT
)
17008 return error_mark_node
;
17010 fnmask
= rs6000_builtin_info
[code
].mask
;
17011 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
17013 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
17014 return error_mark_node
;
17017 return rs6000_builtin_decls
[code
];
17021 paired_init_builtins (void)
17023 const struct builtin_description
*d
;
17025 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17027 tree int_ftype_int_v2sf_v2sf
17028 = build_function_type_list (integer_type_node
,
17033 tree pcfloat_type_node
=
17034 build_pointer_type (build_qualified_type
17035 (float_type_node
, TYPE_QUAL_CONST
));
17037 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
17038 long_integer_type_node
,
17041 tree void_ftype_v2sf_long_pcfloat
=
17042 build_function_type_list (void_type_node
,
17044 long_integer_type_node
,
17049 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
17050 PAIRED_BUILTIN_LX
);
17053 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
17054 PAIRED_BUILTIN_STX
);
17057 d
= bdesc_paired_preds
;
17058 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
17061 HOST_WIDE_INT mask
= d
->mask
;
17063 if ((mask
& builtin_mask
) != mask
)
17065 if (TARGET_DEBUG_BUILTIN
)
17066 fprintf (stderr
, "paired_init_builtins, skip predicate %s\n",
17071 /* Cannot define builtin if the instruction is disabled. */
17072 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17074 if (TARGET_DEBUG_BUILTIN
)
17075 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
17076 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
17077 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
17079 switch (insn_data
[d
->icode
].operand
[1].mode
)
17082 type
= int_ftype_int_v2sf_v2sf
;
17085 gcc_unreachable ();
17088 def_builtin (d
->name
, type
, d
->code
);
17093 altivec_init_builtins (void)
17095 const struct builtin_description
*d
;
17099 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17101 tree pvoid_type_node
= build_pointer_type (void_type_node
);
17103 tree pcvoid_type_node
17104 = build_pointer_type (build_qualified_type (void_type_node
,
17107 tree int_ftype_opaque
17108 = build_function_type_list (integer_type_node
,
17109 opaque_V4SI_type_node
, NULL_TREE
);
17110 tree opaque_ftype_opaque
17111 = build_function_type_list (integer_type_node
, NULL_TREE
);
17112 tree opaque_ftype_opaque_int
17113 = build_function_type_list (opaque_V4SI_type_node
,
17114 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
17115 tree opaque_ftype_opaque_opaque_int
17116 = build_function_type_list (opaque_V4SI_type_node
,
17117 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17118 integer_type_node
, NULL_TREE
);
17119 tree opaque_ftype_opaque_opaque_opaque
17120 = build_function_type_list (opaque_V4SI_type_node
,
17121 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17122 opaque_V4SI_type_node
, NULL_TREE
);
17123 tree opaque_ftype_opaque_opaque
17124 = build_function_type_list (opaque_V4SI_type_node
,
17125 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
17127 tree int_ftype_int_opaque_opaque
17128 = build_function_type_list (integer_type_node
,
17129 integer_type_node
, opaque_V4SI_type_node
,
17130 opaque_V4SI_type_node
, NULL_TREE
);
17131 tree int_ftype_int_v4si_v4si
17132 = build_function_type_list (integer_type_node
,
17133 integer_type_node
, V4SI_type_node
,
17134 V4SI_type_node
, NULL_TREE
);
17135 tree int_ftype_int_v2di_v2di
17136 = build_function_type_list (integer_type_node
,
17137 integer_type_node
, V2DI_type_node
,
17138 V2DI_type_node
, NULL_TREE
);
17139 tree void_ftype_v4si
17140 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
17141 tree v8hi_ftype_void
17142 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
17143 tree void_ftype_void
17144 = build_function_type_list (void_type_node
, NULL_TREE
);
17145 tree void_ftype_int
17146 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
17148 tree opaque_ftype_long_pcvoid
17149 = build_function_type_list (opaque_V4SI_type_node
,
17150 long_integer_type_node
, pcvoid_type_node
,
17152 tree v16qi_ftype_long_pcvoid
17153 = build_function_type_list (V16QI_type_node
,
17154 long_integer_type_node
, pcvoid_type_node
,
17156 tree v8hi_ftype_long_pcvoid
17157 = build_function_type_list (V8HI_type_node
,
17158 long_integer_type_node
, pcvoid_type_node
,
17160 tree v4si_ftype_long_pcvoid
17161 = build_function_type_list (V4SI_type_node
,
17162 long_integer_type_node
, pcvoid_type_node
,
17164 tree v4sf_ftype_long_pcvoid
17165 = build_function_type_list (V4SF_type_node
,
17166 long_integer_type_node
, pcvoid_type_node
,
17168 tree v2df_ftype_long_pcvoid
17169 = build_function_type_list (V2DF_type_node
,
17170 long_integer_type_node
, pcvoid_type_node
,
17172 tree v2di_ftype_long_pcvoid
17173 = build_function_type_list (V2DI_type_node
,
17174 long_integer_type_node
, pcvoid_type_node
,
17177 tree void_ftype_opaque_long_pvoid
17178 = build_function_type_list (void_type_node
,
17179 opaque_V4SI_type_node
, long_integer_type_node
,
17180 pvoid_type_node
, NULL_TREE
);
17181 tree void_ftype_v4si_long_pvoid
17182 = build_function_type_list (void_type_node
,
17183 V4SI_type_node
, long_integer_type_node
,
17184 pvoid_type_node
, NULL_TREE
);
17185 tree void_ftype_v16qi_long_pvoid
17186 = build_function_type_list (void_type_node
,
17187 V16QI_type_node
, long_integer_type_node
,
17188 pvoid_type_node
, NULL_TREE
);
17190 tree void_ftype_v16qi_pvoid_long
17191 = build_function_type_list (void_type_node
,
17192 V16QI_type_node
, pvoid_type_node
,
17193 long_integer_type_node
, NULL_TREE
);
17195 tree void_ftype_v8hi_long_pvoid
17196 = build_function_type_list (void_type_node
,
17197 V8HI_type_node
, long_integer_type_node
,
17198 pvoid_type_node
, NULL_TREE
);
17199 tree void_ftype_v4sf_long_pvoid
17200 = build_function_type_list (void_type_node
,
17201 V4SF_type_node
, long_integer_type_node
,
17202 pvoid_type_node
, NULL_TREE
);
17203 tree void_ftype_v2df_long_pvoid
17204 = build_function_type_list (void_type_node
,
17205 V2DF_type_node
, long_integer_type_node
,
17206 pvoid_type_node
, NULL_TREE
);
17207 tree void_ftype_v2di_long_pvoid
17208 = build_function_type_list (void_type_node
,
17209 V2DI_type_node
, long_integer_type_node
,
17210 pvoid_type_node
, NULL_TREE
);
17211 tree int_ftype_int_v8hi_v8hi
17212 = build_function_type_list (integer_type_node
,
17213 integer_type_node
, V8HI_type_node
,
17214 V8HI_type_node
, NULL_TREE
);
17215 tree int_ftype_int_v16qi_v16qi
17216 = build_function_type_list (integer_type_node
,
17217 integer_type_node
, V16QI_type_node
,
17218 V16QI_type_node
, NULL_TREE
);
17219 tree int_ftype_int_v4sf_v4sf
17220 = build_function_type_list (integer_type_node
,
17221 integer_type_node
, V4SF_type_node
,
17222 V4SF_type_node
, NULL_TREE
);
17223 tree int_ftype_int_v2df_v2df
17224 = build_function_type_list (integer_type_node
,
17225 integer_type_node
, V2DF_type_node
,
17226 V2DF_type_node
, NULL_TREE
);
17227 tree v2di_ftype_v2di
17228 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
17229 tree v4si_ftype_v4si
17230 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
17231 tree v8hi_ftype_v8hi
17232 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
17233 tree v16qi_ftype_v16qi
17234 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
17235 tree v4sf_ftype_v4sf
17236 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
17237 tree v2df_ftype_v2df
17238 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
17239 tree void_ftype_pcvoid_int_int
17240 = build_function_type_list (void_type_node
,
17241 pcvoid_type_node
, integer_type_node
,
17242 integer_type_node
, NULL_TREE
);
17244 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
17245 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
17246 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
17247 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
17248 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
17249 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
17250 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
17251 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
17252 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
17253 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
17254 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
17255 ALTIVEC_BUILTIN_LVXL_V2DF
);
17256 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
17257 ALTIVEC_BUILTIN_LVXL_V2DI
);
17258 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
17259 ALTIVEC_BUILTIN_LVXL_V4SF
);
17260 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
17261 ALTIVEC_BUILTIN_LVXL_V4SI
);
17262 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
17263 ALTIVEC_BUILTIN_LVXL_V8HI
);
17264 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
17265 ALTIVEC_BUILTIN_LVXL_V16QI
);
17266 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
17267 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
17268 ALTIVEC_BUILTIN_LVX_V2DF
);
17269 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
17270 ALTIVEC_BUILTIN_LVX_V2DI
);
17271 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
17272 ALTIVEC_BUILTIN_LVX_V4SF
);
17273 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
17274 ALTIVEC_BUILTIN_LVX_V4SI
);
17275 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
17276 ALTIVEC_BUILTIN_LVX_V8HI
);
17277 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
17278 ALTIVEC_BUILTIN_LVX_V16QI
);
17279 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
17280 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
17281 ALTIVEC_BUILTIN_STVX_V2DF
);
17282 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
17283 ALTIVEC_BUILTIN_STVX_V2DI
);
17284 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
17285 ALTIVEC_BUILTIN_STVX_V4SF
);
17286 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
17287 ALTIVEC_BUILTIN_STVX_V4SI
);
17288 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
17289 ALTIVEC_BUILTIN_STVX_V8HI
);
17290 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
17291 ALTIVEC_BUILTIN_STVX_V16QI
);
17292 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
17293 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
17294 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
17295 ALTIVEC_BUILTIN_STVXL_V2DF
);
17296 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
17297 ALTIVEC_BUILTIN_STVXL_V2DI
);
17298 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
17299 ALTIVEC_BUILTIN_STVXL_V4SF
);
17300 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
17301 ALTIVEC_BUILTIN_STVXL_V4SI
);
17302 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
17303 ALTIVEC_BUILTIN_STVXL_V8HI
);
17304 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
17305 ALTIVEC_BUILTIN_STVXL_V16QI
);
17306 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
17307 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
17308 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
17309 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
17310 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
17311 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
17312 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
17313 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
17314 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
17315 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
17316 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
17317 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
17318 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
17319 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
17320 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
17321 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
17323 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
17324 VSX_BUILTIN_LXVD2X_V2DF
);
17325 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
17326 VSX_BUILTIN_LXVD2X_V2DI
);
17327 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
17328 VSX_BUILTIN_LXVW4X_V4SF
);
17329 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
17330 VSX_BUILTIN_LXVW4X_V4SI
);
17331 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
17332 VSX_BUILTIN_LXVW4X_V8HI
);
17333 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
17334 VSX_BUILTIN_LXVW4X_V16QI
);
17335 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
17336 VSX_BUILTIN_STXVD2X_V2DF
);
17337 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
17338 VSX_BUILTIN_STXVD2X_V2DI
);
17339 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
17340 VSX_BUILTIN_STXVW4X_V4SF
);
17341 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
17342 VSX_BUILTIN_STXVW4X_V4SI
);
17343 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
17344 VSX_BUILTIN_STXVW4X_V8HI
);
17345 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
17346 VSX_BUILTIN_STXVW4X_V16QI
);
17348 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
17349 VSX_BUILTIN_LD_ELEMREV_V2DF
);
17350 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
17351 VSX_BUILTIN_LD_ELEMREV_V2DI
);
17352 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
17353 VSX_BUILTIN_LD_ELEMREV_V4SF
);
17354 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
17355 VSX_BUILTIN_LD_ELEMREV_V4SI
);
17356 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
17357 VSX_BUILTIN_ST_ELEMREV_V2DF
);
17358 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
17359 VSX_BUILTIN_ST_ELEMREV_V2DI
);
17360 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
17361 VSX_BUILTIN_ST_ELEMREV_V4SF
);
17362 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
17363 VSX_BUILTIN_ST_ELEMREV_V4SI
);
17365 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid
,
17366 VSX_BUILTIN_XL_BE_V8HI
);
17367 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid
,
17368 VSX_BUILTIN_XL_BE_V4SI
);
17369 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid
,
17370 VSX_BUILTIN_XL_BE_V2DI
);
17371 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid
,
17372 VSX_BUILTIN_XL_BE_V4SF
);
17373 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid
,
17374 VSX_BUILTIN_XL_BE_V2DF
);
17375 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid
,
17376 VSX_BUILTIN_XL_BE_V16QI
);
17378 if (TARGET_P9_VECTOR
)
17380 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
17381 VSX_BUILTIN_LD_ELEMREV_V8HI
);
17382 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
17383 VSX_BUILTIN_LD_ELEMREV_V16QI
);
17384 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17385 void_ftype_v8hi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V8HI
);
17386 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17387 void_ftype_v16qi_long_pvoid
, VSX_BUILTIN_ST_ELEMREV_V16QI
);
17391 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V8HI
]
17392 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V8HI
];
17393 rs6000_builtin_decls
[(int) VSX_BUILTIN_LD_ELEMREV_V16QI
]
17394 = rs6000_builtin_decls
[(int) VSX_BUILTIN_LXVW4X_V16QI
];
17395 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V8HI
]
17396 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V8HI
];
17397 rs6000_builtin_decls
[(int) VSX_BUILTIN_ST_ELEMREV_V16QI
]
17398 = rs6000_builtin_decls
[(int) VSX_BUILTIN_STXVW4X_V16QI
];
17401 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
17402 VSX_BUILTIN_VEC_LD
);
17403 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
17404 VSX_BUILTIN_VEC_ST
);
17405 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
17406 VSX_BUILTIN_VEC_XL
);
17407 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
17408 VSX_BUILTIN_VEC_XL_BE
);
17409 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
17410 VSX_BUILTIN_VEC_XST
);
17412 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
17413 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
17414 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
17416 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
17417 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
17418 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
17419 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
17420 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
17421 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
17422 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
17423 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
17424 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
17425 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
17426 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
17427 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
17429 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
17430 ALTIVEC_BUILTIN_VEC_ADDE
);
17431 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
17432 ALTIVEC_BUILTIN_VEC_ADDEC
);
17433 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
17434 ALTIVEC_BUILTIN_VEC_CMPNE
);
17435 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
17436 ALTIVEC_BUILTIN_VEC_MUL
);
17437 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
17438 ALTIVEC_BUILTIN_VEC_SUBE
);
17439 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
17440 ALTIVEC_BUILTIN_VEC_SUBEC
);
17442 /* Cell builtins. */
17443 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
17444 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
17445 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
17446 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
17448 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
17449 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
17450 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
17451 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
17453 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
17454 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
17455 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
17456 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
17458 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
17459 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
17460 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
17461 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
17463 if (TARGET_P9_VECTOR
)
17464 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
17465 P9V_BUILTIN_STXVL
);
17467 /* Add the DST variants. */
17469 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
17471 HOST_WIDE_INT mask
= d
->mask
;
17473 /* It is expected that these dst built-in functions may have
17474 d->icode equal to CODE_FOR_nothing. */
17475 if ((mask
& builtin_mask
) != mask
)
17477 if (TARGET_DEBUG_BUILTIN
)
17478 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
17482 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
17485 /* Initialize the predicates. */
17486 d
= bdesc_altivec_preds
;
17487 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
17489 machine_mode mode1
;
17491 HOST_WIDE_INT mask
= d
->mask
;
17493 if ((mask
& builtin_mask
) != mask
)
17495 if (TARGET_DEBUG_BUILTIN
)
17496 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
17501 if (rs6000_overloaded_builtin_p (d
->code
))
17505 /* Cannot define builtin if the instruction is disabled. */
17506 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17507 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17513 type
= int_ftype_int_opaque_opaque
;
17516 type
= int_ftype_int_v2di_v2di
;
17519 type
= int_ftype_int_v4si_v4si
;
17522 type
= int_ftype_int_v8hi_v8hi
;
17525 type
= int_ftype_int_v16qi_v16qi
;
17528 type
= int_ftype_int_v4sf_v4sf
;
17531 type
= int_ftype_int_v2df_v2df
;
17534 gcc_unreachable ();
17537 def_builtin (d
->name
, type
, d
->code
);
17540 /* Initialize the abs* operators. */
17542 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17544 machine_mode mode0
;
17546 HOST_WIDE_INT mask
= d
->mask
;
17548 if ((mask
& builtin_mask
) != mask
)
17550 if (TARGET_DEBUG_BUILTIN
)
17551 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17556 /* Cannot define builtin if the instruction is disabled. */
17557 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17558 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17563 type
= v2di_ftype_v2di
;
17566 type
= v4si_ftype_v4si
;
17569 type
= v8hi_ftype_v8hi
;
17572 type
= v16qi_ftype_v16qi
;
17575 type
= v4sf_ftype_v4sf
;
17578 type
= v2df_ftype_v2df
;
17581 gcc_unreachable ();
17584 def_builtin (d
->name
, type
, d
->code
);
17587 /* Initialize target builtin that implements
17588 targetm.vectorize.builtin_mask_for_load. */
17590 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17591 v16qi_ftype_long_pcvoid
,
17592 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17593 BUILT_IN_MD
, NULL
, NULL_TREE
);
17594 TREE_READONLY (decl
) = 1;
17595 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17596 altivec_builtin_mask_for_load
= decl
;
17598 /* Access to the vec_init patterns. */
17599 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17600 integer_type_node
, integer_type_node
,
17601 integer_type_node
, NULL_TREE
);
17602 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17604 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17605 short_integer_type_node
,
17606 short_integer_type_node
,
17607 short_integer_type_node
,
17608 short_integer_type_node
,
17609 short_integer_type_node
,
17610 short_integer_type_node
,
17611 short_integer_type_node
, NULL_TREE
);
17612 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17614 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17615 char_type_node
, char_type_node
,
17616 char_type_node
, char_type_node
,
17617 char_type_node
, char_type_node
,
17618 char_type_node
, char_type_node
,
17619 char_type_node
, char_type_node
,
17620 char_type_node
, char_type_node
,
17621 char_type_node
, char_type_node
,
17622 char_type_node
, NULL_TREE
);
17623 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17624 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17626 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17627 float_type_node
, float_type_node
,
17628 float_type_node
, NULL_TREE
);
17629 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17631 /* VSX builtins. */
17632 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17633 double_type_node
, NULL_TREE
);
17634 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17636 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17637 intDI_type_node
, NULL_TREE
);
17638 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17640 /* Access to the vec_set patterns. */
17641 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17643 integer_type_node
, NULL_TREE
);
17644 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17646 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17648 integer_type_node
, NULL_TREE
);
17649 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17651 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17653 integer_type_node
, NULL_TREE
);
17654 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17656 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17658 integer_type_node
, NULL_TREE
);
17659 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17661 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17663 integer_type_node
, NULL_TREE
);
17664 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17666 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17668 integer_type_node
, NULL_TREE
);
17669 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17671 /* Access to the vec_extract patterns. */
17672 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17673 integer_type_node
, NULL_TREE
);
17674 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17676 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17677 integer_type_node
, NULL_TREE
);
17678 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17680 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17681 integer_type_node
, NULL_TREE
);
17682 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17684 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17685 integer_type_node
, NULL_TREE
);
17686 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17688 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17689 integer_type_node
, NULL_TREE
);
17690 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17692 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17693 integer_type_node
, NULL_TREE
);
17694 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17697 if (V1TI_type_node
)
17699 tree v1ti_ftype_long_pcvoid
17700 = build_function_type_list (V1TI_type_node
,
17701 long_integer_type_node
, pcvoid_type_node
,
17703 tree void_ftype_v1ti_long_pvoid
17704 = build_function_type_list (void_type_node
,
17705 V1TI_type_node
, long_integer_type_node
,
17706 pvoid_type_node
, NULL_TREE
);
17707 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17708 VSX_BUILTIN_LXVD2X_V1TI
);
17709 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17710 VSX_BUILTIN_STXVD2X_V1TI
);
17711 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17712 NULL_TREE
, NULL_TREE
);
17713 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17714 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17716 integer_type_node
, NULL_TREE
);
17717 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17718 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17719 integer_type_node
, NULL_TREE
);
17720 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17726 htm_init_builtins (void)
17728 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17729 const struct builtin_description
*d
;
17733 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17735 tree op
[MAX_HTM_OPERANDS
], type
;
17736 HOST_WIDE_INT mask
= d
->mask
;
17737 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17738 bool void_func
= (attr
& RS6000_BTC_VOID
);
17739 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17741 tree gpr_type_node
;
17745 /* It is expected that these htm built-in functions may have
17746 d->icode equal to CODE_FOR_nothing. */
17748 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17749 gpr_type_node
= long_long_unsigned_type_node
;
17751 gpr_type_node
= long_unsigned_type_node
;
17753 if (attr
& RS6000_BTC_SPR
)
17755 rettype
= gpr_type_node
;
17756 argtype
= gpr_type_node
;
17758 else if (d
->code
== HTM_BUILTIN_TABORTDC
17759 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17761 rettype
= unsigned_type_node
;
17762 argtype
= gpr_type_node
;
17766 rettype
= unsigned_type_node
;
17767 argtype
= unsigned_type_node
;
17770 if ((mask
& builtin_mask
) != mask
)
17772 if (TARGET_DEBUG_BUILTIN
)
17773 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17779 if (TARGET_DEBUG_BUILTIN
)
17780 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17781 (long unsigned) i
);
17785 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17787 if (attr_args
== RS6000_BTC_UNARY
)
17788 op
[nopnds
++] = argtype
;
17789 else if (attr_args
== RS6000_BTC_BINARY
)
17791 op
[nopnds
++] = argtype
;
17792 op
[nopnds
++] = argtype
;
17794 else if (attr_args
== RS6000_BTC_TERNARY
)
17796 op
[nopnds
++] = argtype
;
17797 op
[nopnds
++] = argtype
;
17798 op
[nopnds
++] = argtype
;
17804 type
= build_function_type_list (op
[0], NULL_TREE
);
17807 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17810 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17813 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17817 gcc_unreachable ();
17820 def_builtin (d
->name
, type
, d
->code
);
17824 /* Hash function for builtin functions with up to 3 arguments and a return
17827 builtin_hasher::hash (builtin_hash_struct
*bh
)
17832 for (i
= 0; i
< 4; i
++)
17834 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17835 ret
= (ret
* 2) + bh
->uns_p
[i
];
17841 /* Compare builtin hash entries H1 and H2 for equivalence. */
17843 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
17845 return ((p1
->mode
[0] == p2
->mode
[0])
17846 && (p1
->mode
[1] == p2
->mode
[1])
17847 && (p1
->mode
[2] == p2
->mode
[2])
17848 && (p1
->mode
[3] == p2
->mode
[3])
17849 && (p1
->uns_p
[0] == p2
->uns_p
[0])
17850 && (p1
->uns_p
[1] == p2
->uns_p
[1])
17851 && (p1
->uns_p
[2] == p2
->uns_p
[2])
17852 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
17855 /* Map types for builtin functions with an explicit return type and up to 3
17856 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17857 of the argument. */
17859 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
17860 machine_mode mode_arg1
, machine_mode mode_arg2
,
17861 enum rs6000_builtins builtin
, const char *name
)
17863 struct builtin_hash_struct h
;
17864 struct builtin_hash_struct
*h2
;
17867 tree ret_type
= NULL_TREE
;
17868 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
17870 /* Create builtin_hash_table. */
17871 if (builtin_hash_table
== NULL
)
17872 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
17874 h
.type
= NULL_TREE
;
17875 h
.mode
[0] = mode_ret
;
17876 h
.mode
[1] = mode_arg0
;
17877 h
.mode
[2] = mode_arg1
;
17878 h
.mode
[3] = mode_arg2
;
17884 /* If the builtin is a type that produces unsigned results or takes unsigned
17885 arguments, and it is returned as a decl for the vectorizer (such as
17886 widening multiplies, permute), make sure the arguments and return value
17887 are type correct. */
17890 /* unsigned 1 argument functions. */
17891 case CRYPTO_BUILTIN_VSBOX
:
17892 case P8V_BUILTIN_VGBBD
:
17893 case MISC_BUILTIN_CDTBCD
:
17894 case MISC_BUILTIN_CBCDTD
:
17899 /* unsigned 2 argument functions. */
17900 case ALTIVEC_BUILTIN_VMULEUB
:
17901 case ALTIVEC_BUILTIN_VMULEUH
:
17902 case ALTIVEC_BUILTIN_VMULEUW
:
17903 case ALTIVEC_BUILTIN_VMULOUB
:
17904 case ALTIVEC_BUILTIN_VMULOUH
:
17905 case ALTIVEC_BUILTIN_VMULOUW
:
17906 case CRYPTO_BUILTIN_VCIPHER
:
17907 case CRYPTO_BUILTIN_VCIPHERLAST
:
17908 case CRYPTO_BUILTIN_VNCIPHER
:
17909 case CRYPTO_BUILTIN_VNCIPHERLAST
:
17910 case CRYPTO_BUILTIN_VPMSUMB
:
17911 case CRYPTO_BUILTIN_VPMSUMH
:
17912 case CRYPTO_BUILTIN_VPMSUMW
:
17913 case CRYPTO_BUILTIN_VPMSUMD
:
17914 case CRYPTO_BUILTIN_VPMSUM
:
17915 case MISC_BUILTIN_ADDG6S
:
17916 case MISC_BUILTIN_DIVWEU
:
17917 case MISC_BUILTIN_DIVWEUO
:
17918 case MISC_BUILTIN_DIVDEU
:
17919 case MISC_BUILTIN_DIVDEUO
:
17920 case VSX_BUILTIN_UDIV_V2DI
:
17921 case ALTIVEC_BUILTIN_VMAXUB
:
17922 case ALTIVEC_BUILTIN_VMINUB
:
17923 case ALTIVEC_BUILTIN_VMAXUH
:
17924 case ALTIVEC_BUILTIN_VMINUH
:
17925 case ALTIVEC_BUILTIN_VMAXUW
:
17926 case ALTIVEC_BUILTIN_VMINUW
:
17927 case P8V_BUILTIN_VMAXUD
:
17928 case P8V_BUILTIN_VMINUD
:
17934 /* unsigned 3 argument functions. */
17935 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
17936 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
17937 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
17938 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
17939 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
17940 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
17941 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
17942 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
17943 case VSX_BUILTIN_VPERM_16QI_UNS
:
17944 case VSX_BUILTIN_VPERM_8HI_UNS
:
17945 case VSX_BUILTIN_VPERM_4SI_UNS
:
17946 case VSX_BUILTIN_VPERM_2DI_UNS
:
17947 case VSX_BUILTIN_XXSEL_16QI_UNS
:
17948 case VSX_BUILTIN_XXSEL_8HI_UNS
:
17949 case VSX_BUILTIN_XXSEL_4SI_UNS
:
17950 case VSX_BUILTIN_XXSEL_2DI_UNS
:
17951 case CRYPTO_BUILTIN_VPERMXOR
:
17952 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
17953 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
17954 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
17955 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
17956 case CRYPTO_BUILTIN_VSHASIGMAW
:
17957 case CRYPTO_BUILTIN_VSHASIGMAD
:
17958 case CRYPTO_BUILTIN_VSHASIGMA
:
17965 /* signed permute functions with unsigned char mask. */
17966 case ALTIVEC_BUILTIN_VPERM_16QI
:
17967 case ALTIVEC_BUILTIN_VPERM_8HI
:
17968 case ALTIVEC_BUILTIN_VPERM_4SI
:
17969 case ALTIVEC_BUILTIN_VPERM_4SF
:
17970 case ALTIVEC_BUILTIN_VPERM_2DI
:
17971 case ALTIVEC_BUILTIN_VPERM_2DF
:
17972 case VSX_BUILTIN_VPERM_16QI
:
17973 case VSX_BUILTIN_VPERM_8HI
:
17974 case VSX_BUILTIN_VPERM_4SI
:
17975 case VSX_BUILTIN_VPERM_4SF
:
17976 case VSX_BUILTIN_VPERM_2DI
:
17977 case VSX_BUILTIN_VPERM_2DF
:
17981 /* unsigned args, signed return. */
17982 case VSX_BUILTIN_XVCVUXDSP
:
17983 case VSX_BUILTIN_XVCVUXDDP_UNS
:
17984 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
17988 /* signed args, unsigned return. */
17989 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
17990 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
17991 case MISC_BUILTIN_UNPACK_TD
:
17992 case MISC_BUILTIN_UNPACK_V1TI
:
17996 /* unsigned arguments for 128-bit pack instructions. */
17997 case MISC_BUILTIN_PACK_TD
:
17998 case MISC_BUILTIN_PACK_V1TI
:
18003 /* unsigned second arguments (vector shift right). */
18004 case ALTIVEC_BUILTIN_VSRB
:
18005 case ALTIVEC_BUILTIN_VSRH
:
18006 case ALTIVEC_BUILTIN_VSRW
:
18007 case P8V_BUILTIN_VSRD
:
18015 /* Figure out how many args are present. */
18016 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
18019 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
18020 if (!ret_type
&& h
.uns_p
[0])
18021 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
18024 fatal_error (input_location
,
18025 "internal error: builtin function %qs had an unexpected "
18026 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
18028 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
18029 arg_type
[i
] = NULL_TREE
;
18031 for (i
= 0; i
< num_args
; i
++)
18033 int m
= (int) h
.mode
[i
+1];
18034 int uns_p
= h
.uns_p
[i
+1];
18036 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
18037 if (!arg_type
[i
] && uns_p
)
18038 arg_type
[i
] = builtin_mode_to_type
[m
][0];
18041 fatal_error (input_location
,
18042 "internal error: builtin function %qs, argument %d "
18043 "had unexpected argument type %qs", name
, i
,
18044 GET_MODE_NAME (m
));
18047 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
18048 if (*found
== NULL
)
18050 h2
= ggc_alloc
<builtin_hash_struct
> ();
18054 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
18055 arg_type
[2], NULL_TREE
);
18058 return (*found
)->type
;
18062 rs6000_common_init_builtins (void)
18064 const struct builtin_description
*d
;
18067 tree opaque_ftype_opaque
= NULL_TREE
;
18068 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
18069 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
18070 tree v2si_ftype
= NULL_TREE
;
18071 tree v2si_ftype_qi
= NULL_TREE
;
18072 tree v2si_ftype_v2si_qi
= NULL_TREE
;
18073 tree v2si_ftype_int_qi
= NULL_TREE
;
18074 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
18076 if (!TARGET_PAIRED_FLOAT
)
18078 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
18079 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
18082 /* Paired builtins are only available if you build a compiler with the
18083 appropriate options, so only create those builtins with the appropriate
18084 compiler option. Create Altivec and VSX builtins on machines with at
18085 least the general purpose extensions (970 and newer) to allow the use of
18086 the target attribute.. */
18088 if (TARGET_EXTRA_BUILTINS
)
18089 builtin_mask
|= RS6000_BTM_COMMON
;
18091 /* Add the ternary operators. */
18093 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
18096 HOST_WIDE_INT mask
= d
->mask
;
18098 if ((mask
& builtin_mask
) != mask
)
18100 if (TARGET_DEBUG_BUILTIN
)
18101 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
18105 if (rs6000_overloaded_builtin_p (d
->code
))
18107 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
18108 type
= opaque_ftype_opaque_opaque_opaque
18109 = build_function_type_list (opaque_V4SI_type_node
,
18110 opaque_V4SI_type_node
,
18111 opaque_V4SI_type_node
,
18112 opaque_V4SI_type_node
,
18117 enum insn_code icode
= d
->icode
;
18120 if (TARGET_DEBUG_BUILTIN
)
18121 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18127 if (icode
== CODE_FOR_nothing
)
18129 if (TARGET_DEBUG_BUILTIN
)
18130 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
18136 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
18137 insn_data
[icode
].operand
[1].mode
,
18138 insn_data
[icode
].operand
[2].mode
,
18139 insn_data
[icode
].operand
[3].mode
,
18143 def_builtin (d
->name
, type
, d
->code
);
18146 /* Add the binary operators. */
18148 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
18150 machine_mode mode0
, mode1
, mode2
;
18152 HOST_WIDE_INT mask
= d
->mask
;
18154 if ((mask
& builtin_mask
) != mask
)
18156 if (TARGET_DEBUG_BUILTIN
)
18157 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
18161 if (rs6000_overloaded_builtin_p (d
->code
))
18163 if (! (type
= opaque_ftype_opaque_opaque
))
18164 type
= opaque_ftype_opaque_opaque
18165 = build_function_type_list (opaque_V4SI_type_node
,
18166 opaque_V4SI_type_node
,
18167 opaque_V4SI_type_node
,
18172 enum insn_code icode
= d
->icode
;
18175 if (TARGET_DEBUG_BUILTIN
)
18176 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18182 if (icode
== CODE_FOR_nothing
)
18184 if (TARGET_DEBUG_BUILTIN
)
18185 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
18191 mode0
= insn_data
[icode
].operand
[0].mode
;
18192 mode1
= insn_data
[icode
].operand
[1].mode
;
18193 mode2
= insn_data
[icode
].operand
[2].mode
;
18195 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
18197 if (! (type
= v2si_ftype_v2si_qi
))
18198 type
= v2si_ftype_v2si_qi
18199 = build_function_type_list (opaque_V2SI_type_node
,
18200 opaque_V2SI_type_node
,
18205 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
18206 && mode2
== QImode
)
18208 if (! (type
= v2si_ftype_int_qi
))
18209 type
= v2si_ftype_int_qi
18210 = build_function_type_list (opaque_V2SI_type_node
,
18217 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
18221 def_builtin (d
->name
, type
, d
->code
);
18224 /* Add the simple unary operators. */
18226 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
18228 machine_mode mode0
, mode1
;
18230 HOST_WIDE_INT mask
= d
->mask
;
18232 if ((mask
& builtin_mask
) != mask
)
18234 if (TARGET_DEBUG_BUILTIN
)
18235 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
18239 if (rs6000_overloaded_builtin_p (d
->code
))
18241 if (! (type
= opaque_ftype_opaque
))
18242 type
= opaque_ftype_opaque
18243 = build_function_type_list (opaque_V4SI_type_node
,
18244 opaque_V4SI_type_node
,
18249 enum insn_code icode
= d
->icode
;
18252 if (TARGET_DEBUG_BUILTIN
)
18253 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18259 if (icode
== CODE_FOR_nothing
)
18261 if (TARGET_DEBUG_BUILTIN
)
18262 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
18268 mode0
= insn_data
[icode
].operand
[0].mode
;
18269 mode1
= insn_data
[icode
].operand
[1].mode
;
18271 if (mode0
== V2SImode
&& mode1
== QImode
)
18273 if (! (type
= v2si_ftype_qi
))
18274 type
= v2si_ftype_qi
18275 = build_function_type_list (opaque_V2SI_type_node
,
18281 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
18285 def_builtin (d
->name
, type
, d
->code
);
18288 /* Add the simple no-argument operators. */
18290 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
18292 machine_mode mode0
;
18294 HOST_WIDE_INT mask
= d
->mask
;
18296 if ((mask
& builtin_mask
) != mask
)
18298 if (TARGET_DEBUG_BUILTIN
)
18299 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
18302 if (rs6000_overloaded_builtin_p (d
->code
))
18304 if (!opaque_ftype_opaque
)
18305 opaque_ftype_opaque
18306 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
18307 type
= opaque_ftype_opaque
;
18311 enum insn_code icode
= d
->icode
;
18314 if (TARGET_DEBUG_BUILTIN
)
18315 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18316 (long unsigned) i
);
18319 if (icode
== CODE_FOR_nothing
)
18321 if (TARGET_DEBUG_BUILTIN
)
18323 "rs6000_builtin, skip no-argument %s (no code)\n",
18327 mode0
= insn_data
[icode
].operand
[0].mode
;
18328 if (mode0
== V2SImode
)
18330 /* code for paired single */
18331 if (! (type
= v2si_ftype
))
18334 = build_function_type_list (opaque_V2SI_type_node
,
18340 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
18343 def_builtin (d
->name
, type
, d
->code
);
18347 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18349 init_float128_ibm (machine_mode mode
)
18351 if (!TARGET_XL_COMPAT
)
18353 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
18354 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
18355 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
18356 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
18358 if (!TARGET_HARD_FLOAT
)
18360 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
18361 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
18362 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
18363 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
18364 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
18365 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
18366 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
18367 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
18369 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
18370 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
18371 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
18372 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
18373 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
18374 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
18375 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
18376 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
18381 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
18382 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
18383 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
18384 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
18387 /* Add various conversions for IFmode to use the traditional TFmode
18389 if (mode
== IFmode
)
18391 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf2");
18392 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf2");
18393 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctftd2");
18394 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd2");
18395 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd2");
18396 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdtf2");
18398 if (TARGET_POWERPC64
)
18400 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
18401 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
18402 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
18403 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
18408 /* Set up IEEE 128-bit floating point routines. Use different names if the
18409 arguments can be passed in a vector register. The historical PowerPC
18410 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18411 continue to use that if we aren't using vector registers to pass IEEE
18412 128-bit floating point. */
18415 init_float128_ieee (machine_mode mode
)
18417 if (FLOAT128_VECTOR_P (mode
))
18419 set_optab_libfunc (add_optab
, mode
, "__addkf3");
18420 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
18421 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
18422 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
18423 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
18424 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
18425 set_optab_libfunc (abs_optab
, mode
, "__abstkf2");
18427 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
18428 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
18429 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
18430 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
18431 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
18432 set_optab_libfunc (le_optab
, mode
, "__lekf2");
18433 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
18435 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
18436 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
18437 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
18438 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
18440 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__extendtfkf2");
18441 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18442 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__extendtfkf2");
18444 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__trunckftf2");
18445 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
18446 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__trunckftf2");
18448 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf2");
18449 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf2");
18450 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunckftd2");
18451 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd2");
18452 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd2");
18453 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdkf2");
18455 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
18456 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
18457 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
18458 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
18460 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
18461 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
18462 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
18463 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
18465 if (TARGET_POWERPC64
)
18467 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
18468 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
18469 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
18470 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
18476 set_optab_libfunc (add_optab
, mode
, "_q_add");
18477 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
18478 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
18479 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
18480 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
18481 if (TARGET_PPC_GPOPT
)
18482 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
18484 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
18485 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
18486 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
18487 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
18488 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
18489 set_optab_libfunc (le_optab
, mode
, "_q_fle");
18491 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
18492 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
18493 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18494 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18495 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18496 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18497 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18498 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18503 rs6000_init_libfuncs (void)
18505 /* __float128 support. */
18506 if (TARGET_FLOAT128_TYPE
)
18508 init_float128_ibm (IFmode
);
18509 init_float128_ieee (KFmode
);
18512 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18513 if (TARGET_LONG_DOUBLE_128
)
18515 if (!TARGET_IEEEQUAD
)
18516 init_float128_ibm (TFmode
);
18518 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18520 init_float128_ieee (TFmode
);
18524 /* Emit a potentially record-form instruction, setting DST from SRC.
18525 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18526 signed comparison of DST with zero. If DOT is 1, the generated RTL
18527 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18528 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18529 a separate COMPARE. */
18532 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18536 emit_move_insn (dst
, src
);
18540 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18542 emit_move_insn (dst
, src
);
18543 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18547 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18550 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18551 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18555 rtx set
= gen_rtx_SET (dst
, src
);
18556 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18561 /* A validation routine: say whether CODE, a condition code, and MODE
18562 match. The other alternatives either don't make sense or should
18563 never be generated. */
18566 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18568 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18569 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18570 && GET_MODE_CLASS (mode
) == MODE_CC
);
18572 /* These don't make sense. */
18573 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18574 || mode
!= CCUNSmode
);
18576 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18577 || mode
== CCUNSmode
);
18579 gcc_assert (mode
== CCFPmode
18580 || (code
!= ORDERED
&& code
!= UNORDERED
18581 && code
!= UNEQ
&& code
!= LTGT
18582 && code
!= UNGT
&& code
!= UNLT
18583 && code
!= UNGE
&& code
!= UNLE
));
18585 /* These should never be generated except for
18586 flag_finite_math_only. */
18587 gcc_assert (mode
!= CCFPmode
18588 || flag_finite_math_only
18589 || (code
!= LE
&& code
!= GE
18590 && code
!= UNEQ
&& code
!= LTGT
18591 && code
!= UNGT
&& code
!= UNLT
));
18593 /* These are invalid; the information is not there. */
18594 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18598 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18599 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18600 not zero, store there the bit offset (counted from the right) where
18601 the single stretch of 1 bits begins; and similarly for B, the bit
18602 offset where it ends. */
18605 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18607 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18608 unsigned HOST_WIDE_INT bit
;
18610 int n
= GET_MODE_PRECISION (mode
);
18612 if (mode
!= DImode
&& mode
!= SImode
)
18615 if (INTVAL (mask
) >= 0)
18618 ne
= exact_log2 (bit
);
18619 nb
= exact_log2 (val
+ bit
);
18621 else if (val
+ 1 == 0)
18630 nb
= exact_log2 (bit
);
18631 ne
= exact_log2 (val
+ bit
);
18636 ne
= exact_log2 (bit
);
18637 if (val
+ bit
== 0)
18645 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18656 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18657 or rldicr instruction, to implement an AND with it in mode MODE. */
18660 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18664 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18667 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18669 if (mode
== DImode
)
18670 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18672 /* For SImode, rlwinm can do everything. */
18673 if (mode
== SImode
)
18674 return (nb
< 32 && ne
< 32);
18679 /* Return the instruction template for an AND with mask in mode MODE, with
18680 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18683 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18687 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18688 gcc_unreachable ();
18690 if (mode
== DImode
&& ne
== 0)
18692 operands
[3] = GEN_INT (63 - nb
);
18694 return "rldicl. %0,%1,0,%3";
18695 return "rldicl %0,%1,0,%3";
18698 if (mode
== DImode
&& nb
== 63)
18700 operands
[3] = GEN_INT (63 - ne
);
18702 return "rldicr. %0,%1,0,%3";
18703 return "rldicr %0,%1,0,%3";
18706 if (nb
< 32 && ne
< 32)
18708 operands
[3] = GEN_INT (31 - nb
);
18709 operands
[4] = GEN_INT (31 - ne
);
18711 return "rlwinm. %0,%1,0,%3,%4";
18712 return "rlwinm %0,%1,0,%3,%4";
18715 gcc_unreachable ();
18718 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18719 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18720 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18723 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18727 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18730 int n
= GET_MODE_PRECISION (mode
);
18733 if (CONST_INT_P (XEXP (shift
, 1)))
18735 sh
= INTVAL (XEXP (shift
, 1));
18736 if (sh
< 0 || sh
>= n
)
18740 rtx_code code
= GET_CODE (shift
);
18742 /* Convert any shift by 0 to a rotate, to simplify below code. */
18746 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18747 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18749 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18755 /* DImode rotates need rld*. */
18756 if (mode
== DImode
&& code
== ROTATE
)
18757 return (nb
== 63 || ne
== 0 || ne
== sh
);
18759 /* SImode rotates need rlw*. */
18760 if (mode
== SImode
&& code
== ROTATE
)
18761 return (nb
< 32 && ne
< 32 && sh
< 32);
18763 /* Wrap-around masks are only okay for rotates. */
18767 /* Variable shifts are only okay for rotates. */
18771 /* Don't allow ASHIFT if the mask is wrong for that. */
18772 if (code
== ASHIFT
&& ne
< sh
)
18775 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18776 if the mask is wrong for that. */
18777 if (nb
< 32 && ne
< 32 && sh
< 32
18778 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18781 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18782 if the mask is wrong for that. */
18783 if (code
== LSHIFTRT
)
18785 if (nb
== 63 || ne
== 0 || ne
== sh
)
18786 return !(code
== LSHIFTRT
&& nb
>= sh
);
18791 /* Return the instruction template for a shift with mask in mode MODE, with
18792 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18795 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18799 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18800 gcc_unreachable ();
18802 if (mode
== DImode
&& ne
== 0)
18804 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18805 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18806 operands
[3] = GEN_INT (63 - nb
);
18808 return "rld%I2cl. %0,%1,%2,%3";
18809 return "rld%I2cl %0,%1,%2,%3";
18812 if (mode
== DImode
&& nb
== 63)
18814 operands
[3] = GEN_INT (63 - ne
);
18816 return "rld%I2cr. %0,%1,%2,%3";
18817 return "rld%I2cr %0,%1,%2,%3";
18821 && GET_CODE (operands
[4]) != LSHIFTRT
18822 && CONST_INT_P (operands
[2])
18823 && ne
== INTVAL (operands
[2]))
18825 operands
[3] = GEN_INT (63 - nb
);
18827 return "rld%I2c. %0,%1,%2,%3";
18828 return "rld%I2c %0,%1,%2,%3";
18831 if (nb
< 32 && ne
< 32)
18833 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18834 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18835 operands
[3] = GEN_INT (31 - nb
);
18836 operands
[4] = GEN_INT (31 - ne
);
18837 /* This insn can also be a 64-bit rotate with mask that really makes
18838 it just a shift right (with mask); the %h below are to adjust for
18839 that situation (shift count is >= 32 in that case). */
18841 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18842 return "rlw%I2nm %0,%1,%h2,%3,%4";
18845 gcc_unreachable ();
18848 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18849 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18850 ASHIFT, or LSHIFTRT) in mode MODE. */
18853 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
18857 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18860 int n
= GET_MODE_PRECISION (mode
);
18862 int sh
= INTVAL (XEXP (shift
, 1));
18863 if (sh
< 0 || sh
>= n
)
18866 rtx_code code
= GET_CODE (shift
);
18868 /* Convert any shift by 0 to a rotate, to simplify below code. */
18872 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18873 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18875 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18881 /* DImode rotates need rldimi. */
18882 if (mode
== DImode
&& code
== ROTATE
)
18885 /* SImode rotates need rlwimi. */
18886 if (mode
== SImode
&& code
== ROTATE
)
18887 return (nb
< 32 && ne
< 32 && sh
< 32);
18889 /* Wrap-around masks are only okay for rotates. */
18893 /* Don't allow ASHIFT if the mask is wrong for that. */
18894 if (code
== ASHIFT
&& ne
< sh
)
18897 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18898 if the mask is wrong for that. */
18899 if (nb
< 32 && ne
< 32 && sh
< 32
18900 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18903 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18904 if the mask is wrong for that. */
18905 if (code
== LSHIFTRT
)
18908 return !(code
== LSHIFTRT
&& nb
>= sh
);
18913 /* Return the instruction template for an insert with mask in mode MODE, with
18914 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18917 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18921 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18922 gcc_unreachable ();
18924 /* Prefer rldimi because rlwimi is cracked. */
18925 if (TARGET_POWERPC64
18926 && (!dot
|| mode
== DImode
)
18927 && GET_CODE (operands
[4]) != LSHIFTRT
18928 && ne
== INTVAL (operands
[2]))
18930 operands
[3] = GEN_INT (63 - nb
);
18932 return "rldimi. %0,%1,%2,%3";
18933 return "rldimi %0,%1,%2,%3";
18936 if (nb
< 32 && ne
< 32)
18938 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18939 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18940 operands
[3] = GEN_INT (31 - nb
);
18941 operands
[4] = GEN_INT (31 - ne
);
18943 return "rlwimi. %0,%1,%2,%3,%4";
18944 return "rlwimi %0,%1,%2,%3,%4";
18947 gcc_unreachable ();
18950 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18951 using two machine instructions. */
18954 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
18956 /* There are two kinds of AND we can handle with two insns:
18957 1) those we can do with two rl* insn;
18960 We do not handle that last case yet. */
18962 /* If there is just one stretch of ones, we can do it. */
18963 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
18966 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18967 one insn, we can do the whole thing with two. */
18968 unsigned HOST_WIDE_INT val
= INTVAL (c
);
18969 unsigned HOST_WIDE_INT bit1
= val
& -val
;
18970 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
18971 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
18972 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
18973 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
18976 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18977 If EXPAND is true, split rotate-and-mask instructions we generate to
18978 their constituent parts as well (this is used during expand); if DOT
18979 is 1, make the last insn a record-form instruction clobbering the
18980 destination GPR and setting the CC reg (from operands[3]); if 2, set
18981 that GPR as well as the CC reg. */
18984 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
18986 gcc_assert (!(expand
&& dot
));
18988 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
18990 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18991 shift right. This generates better code than doing the masks without
18992 shifts, or shifting first right and then left. */
18994 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
18996 gcc_assert (mode
== DImode
);
18998 int shift
= 63 - nb
;
19001 rtx tmp1
= gen_reg_rtx (DImode
);
19002 rtx tmp2
= gen_reg_rtx (DImode
);
19003 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
19004 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
19005 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
19009 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
19010 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
19011 emit_move_insn (operands
[0], tmp
);
19012 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
19013 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19018 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19019 that does the rest. */
19020 unsigned HOST_WIDE_INT bit1
= val
& -val
;
19021 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
19022 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
19023 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
19025 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
19026 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
19028 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
19030 /* Two "no-rotate"-and-mask instructions, for SImode. */
19031 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
19033 gcc_assert (mode
== SImode
);
19035 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19036 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
19037 emit_move_insn (reg
, tmp
);
19038 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19039 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19043 gcc_assert (mode
== DImode
);
19045 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19046 insns; we have to do the first in SImode, because it wraps. */
19047 if (mask2
<= 0xffffffff
19048 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
19050 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
19051 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
19053 rtx reg_low
= gen_lowpart (SImode
, reg
);
19054 emit_move_insn (reg_low
, tmp
);
19055 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
19056 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19060 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19061 at the top end), rotate back and clear the other hole. */
19062 int right
= exact_log2 (bit3
);
19063 int left
= 64 - right
;
19065 /* Rotate the mask too. */
19066 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
19070 rtx tmp1
= gen_reg_rtx (DImode
);
19071 rtx tmp2
= gen_reg_rtx (DImode
);
19072 rtx tmp3
= gen_reg_rtx (DImode
);
19073 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
19074 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
19075 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
19076 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
19080 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
19081 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
19082 emit_move_insn (operands
[0], tmp
);
19083 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
19084 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
19085 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
19089 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19090 for lfq and stfq insns iff the registers are hard registers. */
19093 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
19095 /* We might have been passed a SUBREG. */
19096 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
19099 /* We might have been passed non floating point registers. */
19100 if (!FP_REGNO_P (REGNO (reg1
))
19101 || !FP_REGNO_P (REGNO (reg2
)))
19104 return (REGNO (reg1
) == REGNO (reg2
) - 1);
19107 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19108 addr1 and addr2 must be in consecutive memory locations
19109 (addr2 == addr1 + 8). */
19112 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
19115 unsigned int reg1
, reg2
;
19116 int offset1
, offset2
;
19118 /* The mems cannot be volatile. */
19119 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
19122 addr1
= XEXP (mem1
, 0);
19123 addr2
= XEXP (mem2
, 0);
19125 /* Extract an offset (if used) from the first addr. */
19126 if (GET_CODE (addr1
) == PLUS
)
19128 /* If not a REG, return zero. */
19129 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
19133 reg1
= REGNO (XEXP (addr1
, 0));
19134 /* The offset must be constant! */
19135 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
19137 offset1
= INTVAL (XEXP (addr1
, 1));
19140 else if (GET_CODE (addr1
) != REG
)
19144 reg1
= REGNO (addr1
);
19145 /* This was a simple (mem (reg)) expression. Offset is 0. */
19149 /* And now for the second addr. */
19150 if (GET_CODE (addr2
) == PLUS
)
19152 /* If not a REG, return zero. */
19153 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
19157 reg2
= REGNO (XEXP (addr2
, 0));
19158 /* The offset must be constant. */
19159 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
19161 offset2
= INTVAL (XEXP (addr2
, 1));
19164 else if (GET_CODE (addr2
) != REG
)
19168 reg2
= REGNO (addr2
);
19169 /* This was a simple (mem (reg)) expression. Offset is 0. */
19173 /* Both of these must have the same base register. */
19177 /* The offset for the second addr must be 8 more than the first addr. */
19178 if (offset2
!= offset1
+ 8)
19181 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19186 /* Return the mode to be used for memory when a secondary memory
19187 location is needed. For SDmode values we need to use DDmode, in
19188 all other cases we can use the same mode. */
19190 rs6000_secondary_memory_needed_mode (machine_mode mode
)
19192 if (lra_in_progress
&& mode
== SDmode
)
19197 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19198 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19199 only work on the traditional altivec registers, note if an altivec register
19202 static enum rs6000_reg_type
19203 register_to_reg_type (rtx reg
, bool *is_altivec
)
19205 HOST_WIDE_INT regno
;
19206 enum reg_class rclass
;
19208 if (GET_CODE (reg
) == SUBREG
)
19209 reg
= SUBREG_REG (reg
);
19212 return NO_REG_TYPE
;
19214 regno
= REGNO (reg
);
19215 if (regno
>= FIRST_PSEUDO_REGISTER
)
19217 if (!lra_in_progress
&& !reload_completed
)
19218 return PSEUDO_REG_TYPE
;
19220 regno
= true_regnum (reg
);
19221 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
19222 return PSEUDO_REG_TYPE
;
19225 gcc_assert (regno
>= 0);
19227 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
19228 *is_altivec
= true;
19230 rclass
= rs6000_regno_regclass
[regno
];
19231 return reg_class_to_reg_type
[(int)rclass
];
19234 /* Helper function to return the cost of adding a TOC entry address. */
19237 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
19241 if (TARGET_CMODEL
!= CMODEL_SMALL
)
19242 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
19245 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
19250 /* Helper function for rs6000_secondary_reload to determine whether the memory
19251 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19252 needs reloading. Return negative if the memory is not handled by the memory
19253 helper functions and to try a different reload method, 0 if no additional
19254 instructions are need, and positive to give the extra cost for the
19258 rs6000_secondary_reload_memory (rtx addr
,
19259 enum reg_class rclass
,
19262 int extra_cost
= 0;
19263 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
19264 addr_mask_type addr_mask
;
19265 const char *type
= NULL
;
19266 const char *fail_msg
= NULL
;
19268 if (GPR_REG_CLASS_P (rclass
))
19269 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19271 else if (rclass
== FLOAT_REGS
)
19272 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19274 else if (rclass
== ALTIVEC_REGS
)
19275 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19277 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19278 else if (rclass
== VSX_REGS
)
19279 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
19280 & ~RELOAD_REG_AND_M16
);
19282 /* If the register allocator hasn't made up its mind yet on the register
19283 class to use, settle on defaults to use. */
19284 else if (rclass
== NO_REGS
)
19286 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
19287 & ~RELOAD_REG_AND_M16
);
19289 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
19290 addr_mask
&= ~(RELOAD_REG_INDEXED
19291 | RELOAD_REG_PRE_INCDEC
19292 | RELOAD_REG_PRE_MODIFY
);
19298 /* If the register isn't valid in this register class, just return now. */
19299 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19301 if (TARGET_DEBUG_ADDR
)
19304 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19305 "not valid in class\n",
19306 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
19313 switch (GET_CODE (addr
))
19315 /* Does the register class supports auto update forms for this mode? We
19316 don't need a scratch register, since the powerpc only supports
19317 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19320 reg
= XEXP (addr
, 0);
19321 if (!base_reg_operand (addr
, GET_MODE (reg
)))
19323 fail_msg
= "no base register #1";
19327 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19335 reg
= XEXP (addr
, 0);
19336 plus_arg1
= XEXP (addr
, 1);
19337 if (!base_reg_operand (reg
, GET_MODE (reg
))
19338 || GET_CODE (plus_arg1
) != PLUS
19339 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
19341 fail_msg
= "bad PRE_MODIFY";
19345 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19352 /* Do we need to simulate AND -16 to clear the bottom address bits used
19353 in VMX load/stores? Only allow the AND for vector sizes. */
19355 and_arg
= XEXP (addr
, 0);
19356 if (GET_MODE_SIZE (mode
) != 16
19357 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
19358 || INTVAL (XEXP (addr
, 1)) != -16)
19360 fail_msg
= "bad Altivec AND #1";
19364 if (rclass
!= ALTIVEC_REGS
)
19366 if (legitimate_indirect_address_p (and_arg
, false))
19369 else if (legitimate_indexed_address_p (and_arg
, false))
19374 fail_msg
= "bad Altivec AND #2";
19382 /* If this is an indirect address, make sure it is a base register. */
19385 if (!legitimate_indirect_address_p (addr
, false))
19392 /* If this is an indexed address, make sure the register class can handle
19393 indexed addresses for this mode. */
19395 plus_arg0
= XEXP (addr
, 0);
19396 plus_arg1
= XEXP (addr
, 1);
19398 /* (plus (plus (reg) (constant)) (constant)) is generated during
19399 push_reload processing, so handle it now. */
19400 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
19402 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19409 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19410 push_reload processing, so handle it now. */
19411 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
19413 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19416 type
= "indexed #2";
19420 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
19422 fail_msg
= "no base register #2";
19426 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
19428 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
19429 || !legitimate_indexed_address_p (addr
, false))
19436 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
19437 && CONST_INT_P (plus_arg1
))
19439 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
19442 type
= "vector d-form offset";
19446 /* Make sure the register class can handle offset addresses. */
19447 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19449 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19452 type
= "offset #2";
19458 fail_msg
= "bad PLUS";
19465 /* Quad offsets are restricted and can't handle normal addresses. */
19466 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19469 type
= "vector d-form lo_sum";
19472 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
19474 fail_msg
= "bad LO_SUM";
19478 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19485 /* Static addresses need to create a TOC entry. */
19489 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19492 type
= "vector d-form lo_sum #2";
19498 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19502 /* TOC references look like offsetable memory. */
19504 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19506 fail_msg
= "bad UNSPEC";
19510 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19513 type
= "vector d-form lo_sum #3";
19516 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19519 type
= "toc reference";
19525 fail_msg
= "bad address";
19530 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19532 if (extra_cost
< 0)
19534 "rs6000_secondary_reload_memory error: mode = %s, "
19535 "class = %s, addr_mask = '%s', %s\n",
19536 GET_MODE_NAME (mode
),
19537 reg_class_names
[rclass
],
19538 rs6000_debug_addr_mask (addr_mask
, false),
19539 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19543 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19544 "addr_mask = '%s', extra cost = %d, %s\n",
19545 GET_MODE_NAME (mode
),
19546 reg_class_names
[rclass
],
19547 rs6000_debug_addr_mask (addr_mask
, false),
19549 (type
) ? type
: "<none>");
19557 /* Helper function for rs6000_secondary_reload to return true if a move to a
19558 different register classe is really a simple move. */
19561 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19562 enum rs6000_reg_type from_type
,
19565 int size
= GET_MODE_SIZE (mode
);
19567 /* Add support for various direct moves available. In this function, we only
19568 look at cases where we don't need any extra registers, and one or more
19569 simple move insns are issued. Originally small integers are not allowed
19570 in FPR/VSX registers. Single precision binary floating is not a simple
19571 move because we need to convert to the single precision memory layout.
19572 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19573 need special direct move handling, which we do not support yet. */
19574 if (TARGET_DIRECT_MOVE
19575 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19576 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19578 if (TARGET_POWERPC64
)
19580 /* ISA 2.07: MTVSRD or MVFVSRD. */
19584 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19585 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19589 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19590 if (TARGET_P8_VECTOR
)
19592 if (mode
== SImode
)
19595 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19599 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19600 if (mode
== SDmode
)
19604 /* Power6+: MFTGPR or MFFGPR. */
19605 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19606 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19607 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19610 /* Move to/from SPR. */
19611 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19612 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19613 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19619 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19620 special direct moves that involve allocating an extra register, return the
19621 insn code of the helper function if there is such a function or
19622 CODE_FOR_nothing if not. */
19625 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19626 enum rs6000_reg_type from_type
,
19628 secondary_reload_info
*sri
,
19632 enum insn_code icode
= CODE_FOR_nothing
;
19634 int size
= GET_MODE_SIZE (mode
);
19636 if (TARGET_POWERPC64
&& size
== 16)
19638 /* Handle moving 128-bit values from GPRs to VSX point registers on
19639 ISA 2.07 (power8, power9) when running in 64-bit mode using
19640 XXPERMDI to glue the two 64-bit values back together. */
19641 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19643 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19644 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19647 /* Handle moving 128-bit values from VSX point registers to GPRs on
19648 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19649 bottom 64-bit value. */
19650 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19652 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19653 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19657 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19659 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19661 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19662 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19665 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19667 cost
= 2; /* mtvsrz, xscvspdpn. */
19668 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19672 else if (!TARGET_POWERPC64
&& size
== 8)
19674 /* Handle moving 64-bit values from GPRs to floating point registers on
19675 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19676 32-bit values back together. Altivec register classes must be handled
19677 specially since a different instruction is used, and the secondary
19678 reload support requires a single instruction class in the scratch
19679 register constraint. However, right now TFmode is not allowed in
19680 Altivec registers, so the pattern will never match. */
19681 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19683 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19684 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19688 if (icode
!= CODE_FOR_nothing
)
19693 sri
->icode
= icode
;
19694 sri
->extra_cost
= cost
;
19701 /* Return whether a move between two register classes can be done either
19702 directly (simple move) or via a pattern that uses a single extra temporary
19703 (using ISA 2.07's direct move in this case. */
19706 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19707 enum rs6000_reg_type from_type
,
19709 secondary_reload_info
*sri
,
19712 /* Fall back to load/store reloads if either type is not a register. */
19713 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19716 /* If we haven't allocated registers yet, assume the move can be done for the
19717 standard register types. */
19718 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19719 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19720 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19723 /* Moves to the same set of registers is a simple move for non-specialized
19725 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19728 /* Check whether a simple move can be done directly. */
19729 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19733 sri
->icode
= CODE_FOR_nothing
;
19734 sri
->extra_cost
= 0;
19739 /* Now check if we can do it in a few steps. */
19740 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19744 /* Inform reload about cases where moving X with a mode MODE to a register in
19745 RCLASS requires an extra scratch or immediate register. Return the class
19746 needed for the immediate register.
19748 For VSX and Altivec, we may need a register to convert sp+offset into
19751 For misaligned 64-bit gpr loads and stores we need a register to
19752 convert an offset address to indirect. */
19755 rs6000_secondary_reload (bool in_p
,
19757 reg_class_t rclass_i
,
19759 secondary_reload_info
*sri
)
19761 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19762 reg_class_t ret
= ALL_REGS
;
19763 enum insn_code icode
;
19764 bool default_p
= false;
19765 bool done_p
= false;
19767 /* Allow subreg of memory before/during reload. */
19768 bool memory_p
= (MEM_P (x
)
19769 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19770 && MEM_P (SUBREG_REG (x
))));
19772 sri
->icode
= CODE_FOR_nothing
;
19773 sri
->t_icode
= CODE_FOR_nothing
;
19774 sri
->extra_cost
= 0;
19776 ? reg_addr
[mode
].reload_load
19777 : reg_addr
[mode
].reload_store
);
19779 if (REG_P (x
) || register_operand (x
, mode
))
19781 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19782 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19783 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19786 std::swap (to_type
, from_type
);
19788 /* Can we do a direct move of some sort? */
19789 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19792 icode
= (enum insn_code
)sri
->icode
;
19799 /* Make sure 0.0 is not reloaded or forced into memory. */
19800 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19807 /* If this is a scalar floating point value and we want to load it into the
19808 traditional Altivec registers, do it via a move via a traditional floating
19809 point register, unless we have D-form addressing. Also make sure that
19810 non-zero constants use a FPR. */
19811 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19812 && !mode_supports_vmx_dform (mode
)
19813 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19814 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19821 /* Handle reload of load/stores if we have reload helper functions. */
19822 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
19824 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
19827 if (extra_cost
>= 0)
19831 if (extra_cost
> 0)
19833 sri
->extra_cost
= extra_cost
;
19834 sri
->icode
= icode
;
19839 /* Handle unaligned loads and stores of integer registers. */
19840 if (!done_p
&& TARGET_POWERPC64
19841 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19843 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
19845 rtx addr
= XEXP (x
, 0);
19846 rtx off
= address_offset (addr
);
19848 if (off
!= NULL_RTX
)
19850 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19851 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19853 /* We need a secondary reload when our legitimate_address_p
19854 says the address is good (as otherwise the entire address
19855 will be reloaded), and the offset is not a multiple of
19856 four or we have an address wrap. Address wrap will only
19857 occur for LO_SUMs since legitimate_offset_address_p
19858 rejects addresses for 16-byte mems that will wrap. */
19859 if (GET_CODE (addr
) == LO_SUM
19860 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19861 && ((offset
& 3) != 0
19862 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
19863 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
19864 && (offset
& 3) != 0))
19866 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19868 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
19869 : CODE_FOR_reload_di_load
);
19871 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
19872 : CODE_FOR_reload_di_store
);
19873 sri
->extra_cost
= 2;
19884 if (!done_p
&& !TARGET_POWERPC64
19885 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19887 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
19889 rtx addr
= XEXP (x
, 0);
19890 rtx off
= address_offset (addr
);
19892 if (off
!= NULL_RTX
)
19894 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19895 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19897 /* We need a secondary reload when our legitimate_address_p
19898 says the address is good (as otherwise the entire address
19899 will be reloaded), and we have a wrap.
19901 legitimate_lo_sum_address_p allows LO_SUM addresses to
19902 have any offset so test for wrap in the low 16 bits.
19904 legitimate_offset_address_p checks for the range
19905 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19906 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19907 [0x7ff4,0x7fff] respectively, so test for the
19908 intersection of these ranges, [0x7ffc,0x7fff] and
19909 [0x7ff4,0x7ff7] respectively.
19911 Note that the address we see here may have been
19912 manipulated by legitimize_reload_address. */
19913 if (GET_CODE (addr
) == LO_SUM
19914 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
19915 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
19918 sri
->icode
= CODE_FOR_reload_si_load
;
19920 sri
->icode
= CODE_FOR_reload_si_store
;
19921 sri
->extra_cost
= 2;
19936 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
19938 gcc_assert (ret
!= ALL_REGS
);
19940 if (TARGET_DEBUG_ADDR
)
19943 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19945 reg_class_names
[ret
],
19946 in_p
? "true" : "false",
19947 reg_class_names
[rclass
],
19948 GET_MODE_NAME (mode
));
19950 if (reload_completed
)
19951 fputs (", after reload", stderr
);
19954 fputs (", done_p not set", stderr
);
19957 fputs (", default secondary reload", stderr
);
19959 if (sri
->icode
!= CODE_FOR_nothing
)
19960 fprintf (stderr
, ", reload func = %s, extra cost = %d",
19961 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
19963 else if (sri
->extra_cost
> 0)
19964 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
19966 fputs ("\n", stderr
);
19973 /* Better tracing for rs6000_secondary_reload_inner. */
19976 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
19981 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
19983 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
19984 store_p
? "store" : "load");
19987 set
= gen_rtx_SET (mem
, reg
);
19989 set
= gen_rtx_SET (reg
, mem
);
19991 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
19992 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
19995 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
19996 ATTRIBUTE_NORETURN
;
19999 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
20002 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
20003 gcc_unreachable ();
20006 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20007 reload helper functions. These were identified in
20008 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20009 reload, it calls the insns:
20010 reload_<RELOAD:mode>_<P:mptrsize>_store
20011 reload_<RELOAD:mode>_<P:mptrsize>_load
20013 which in turn calls this function, to do whatever is necessary to create
20014 valid addresses. */
20017 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20019 int regno
= true_regnum (reg
);
20020 machine_mode mode
= GET_MODE (reg
);
20021 addr_mask_type addr_mask
;
20024 rtx op_reg
, op0
, op1
;
20029 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
20030 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
20031 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20033 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
20034 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
20036 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
20037 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
20039 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
20040 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
20043 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20045 /* Make sure the mode is valid in this register class. */
20046 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
20047 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20049 if (TARGET_DEBUG_ADDR
)
20050 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
20052 new_addr
= addr
= XEXP (mem
, 0);
20053 switch (GET_CODE (addr
))
20055 /* Does the register class support auto update forms for this mode? If
20056 not, do the update now. We don't need a scratch register, since the
20057 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20060 op_reg
= XEXP (addr
, 0);
20061 if (!base_reg_operand (op_reg
, Pmode
))
20062 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20064 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
20066 emit_insn (gen_add2_insn (op_reg
, GEN_INT (GET_MODE_SIZE (mode
))));
20072 op0
= XEXP (addr
, 0);
20073 op1
= XEXP (addr
, 1);
20074 if (!base_reg_operand (op0
, Pmode
)
20075 || GET_CODE (op1
) != PLUS
20076 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
20077 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20079 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
20081 emit_insn (gen_rtx_SET (op0
, op1
));
20086 /* Do we need to simulate AND -16 to clear the bottom address bits used
20087 in VMX load/stores? */
20089 op0
= XEXP (addr
, 0);
20090 op1
= XEXP (addr
, 1);
20091 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
20093 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
20096 else if (GET_CODE (op1
) == PLUS
)
20098 emit_insn (gen_rtx_SET (scratch
, op1
));
20103 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20105 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
20106 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
20107 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
20108 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
20109 new_addr
= scratch
;
20113 /* If this is an indirect address, make sure it is a base register. */
20116 if (!base_reg_operand (addr
, GET_MODE (addr
)))
20118 emit_insn (gen_rtx_SET (scratch
, addr
));
20119 new_addr
= scratch
;
20123 /* If this is an indexed address, make sure the register class can handle
20124 indexed addresses for this mode. */
20126 op0
= XEXP (addr
, 0);
20127 op1
= XEXP (addr
, 1);
20128 if (!base_reg_operand (op0
, Pmode
))
20129 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20131 else if (int_reg_operand (op1
, Pmode
))
20133 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20135 emit_insn (gen_rtx_SET (scratch
, addr
));
20136 new_addr
= scratch
;
20140 else if (mode_supports_vsx_dform_quad (mode
) && CONST_INT_P (op1
))
20142 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
20143 || !quad_address_p (addr
, mode
, false))
20145 emit_insn (gen_rtx_SET (scratch
, addr
));
20146 new_addr
= scratch
;
20150 /* Make sure the register class can handle offset addresses. */
20151 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
20153 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20155 emit_insn (gen_rtx_SET (scratch
, addr
));
20156 new_addr
= scratch
;
20161 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20166 op0
= XEXP (addr
, 0);
20167 op1
= XEXP (addr
, 1);
20168 if (!base_reg_operand (op0
, Pmode
))
20169 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20171 else if (int_reg_operand (op1
, Pmode
))
20173 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
20175 emit_insn (gen_rtx_SET (scratch
, addr
));
20176 new_addr
= scratch
;
20180 /* Quad offsets are restricted and can't handle normal addresses. */
20181 else if (mode_supports_vsx_dform_quad (mode
))
20183 emit_insn (gen_rtx_SET (scratch
, addr
));
20184 new_addr
= scratch
;
20187 /* Make sure the register class can handle offset addresses. */
20188 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
20190 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
20192 emit_insn (gen_rtx_SET (scratch
, addr
));
20193 new_addr
= scratch
;
20198 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20205 rs6000_emit_move (scratch
, addr
, Pmode
);
20206 new_addr
= scratch
;
20210 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
20213 /* Adjust the address if it changed. */
20214 if (addr
!= new_addr
)
20216 mem
= replace_equiv_address_nv (mem
, new_addr
);
20217 if (TARGET_DEBUG_ADDR
)
20218 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20221 /* Now create the move. */
20223 emit_insn (gen_rtx_SET (mem
, reg
));
20225 emit_insn (gen_rtx_SET (reg
, mem
));
20230 /* Convert reloads involving 64-bit gprs and misaligned offset
20231 addressing, or multiple 32-bit gprs and offsets that are too large,
20232 to use indirect addressing. */
20235 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
20237 int regno
= true_regnum (reg
);
20238 enum reg_class rclass
;
20240 rtx scratch_or_premodify
= scratch
;
20242 if (TARGET_DEBUG_ADDR
)
20244 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
20245 store_p
? "store" : "load");
20246 fprintf (stderr
, "reg:\n");
20248 fprintf (stderr
, "mem:\n");
20250 fprintf (stderr
, "scratch:\n");
20251 debug_rtx (scratch
);
20254 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
20255 gcc_assert (GET_CODE (mem
) == MEM
);
20256 rclass
= REGNO_REG_CLASS (regno
);
20257 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
20258 addr
= XEXP (mem
, 0);
20260 if (GET_CODE (addr
) == PRE_MODIFY
)
20262 gcc_assert (REG_P (XEXP (addr
, 0))
20263 && GET_CODE (XEXP (addr
, 1)) == PLUS
20264 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
20265 scratch_or_premodify
= XEXP (addr
, 0);
20266 if (!HARD_REGISTER_P (scratch_or_premodify
))
20267 /* If we have a pseudo here then reload will have arranged
20268 to have it replaced, but only in the original insn.
20269 Use the replacement here too. */
20270 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
20272 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20273 expressions from the original insn, without unsharing them.
20274 Any RTL that points into the original insn will of course
20275 have register replacements applied. That is why we don't
20276 need to look for replacements under the PLUS. */
20277 addr
= XEXP (addr
, 1);
20279 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
20281 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
20283 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
20285 /* Now create the move. */
20287 emit_insn (gen_rtx_SET (mem
, reg
));
20289 emit_insn (gen_rtx_SET (reg
, mem
));
20294 /* Given an rtx X being reloaded into a reg required to be
20295 in class CLASS, return the class of reg to actually use.
20296 In general this is just CLASS; but on some machines
20297 in some cases it is preferable to use a more restrictive class.
20299 On the RS/6000, we have to return NO_REGS when we want to reload a
20300 floating-point CONST_DOUBLE to force it to be copied to memory.
20302 We also don't want to reload integer values into floating-point
20303 registers if we can at all help it. In fact, this can
20304 cause reload to die, if it tries to generate a reload of CTR
20305 into a FP register and discovers it doesn't have the memory location
20308 ??? Would it be a good idea to have reload do the converse, that is
20309 try to reload floating modes into FP registers if possible?
20312 static enum reg_class
20313 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
20315 machine_mode mode
= GET_MODE (x
);
20316 bool is_constant
= CONSTANT_P (x
);
20318 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20319 reload class for it. */
20320 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20321 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
20324 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
20325 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
20328 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20329 the reloading of address expressions using PLUS into floating point
20331 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
20335 /* Zero is always allowed in all VSX registers. */
20336 if (x
== CONST0_RTX (mode
))
20339 /* If this is a vector constant that can be formed with a few Altivec
20340 instructions, we want altivec registers. */
20341 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
20342 return ALTIVEC_REGS
;
20344 /* If this is an integer constant that can easily be loaded into
20345 vector registers, allow it. */
20346 if (CONST_INT_P (x
))
20348 HOST_WIDE_INT value
= INTVAL (x
);
20350 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20351 2.06 can generate it in the Altivec registers with
20355 if (TARGET_P8_VECTOR
)
20357 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
20358 return ALTIVEC_REGS
;
20363 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20364 a sign extend in the Altivec registers. */
20365 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
20366 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
20367 return ALTIVEC_REGS
;
20370 /* Force constant to memory. */
20374 /* D-form addressing can easily reload the value. */
20375 if (mode_supports_vmx_dform (mode
)
20376 || mode_supports_vsx_dform_quad (mode
))
20379 /* If this is a scalar floating point value and we don't have D-form
20380 addressing, prefer the traditional floating point registers so that we
20381 can use D-form (register+offset) addressing. */
20382 if (rclass
== VSX_REGS
20383 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
20386 /* Prefer the Altivec registers if Altivec is handling the vector
20387 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20389 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
20390 || mode
== V1TImode
)
20391 return ALTIVEC_REGS
;
20396 if (is_constant
|| GET_CODE (x
) == PLUS
)
20398 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
20399 return GENERAL_REGS
;
20400 if (reg_class_subset_p (BASE_REGS
, rclass
))
20405 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
20406 return GENERAL_REGS
;
20411 /* Debug version of rs6000_preferred_reload_class. */
20412 static enum reg_class
20413 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
20415 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
20418 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20420 reg_class_names
[ret
], reg_class_names
[rclass
],
20421 GET_MODE_NAME (GET_MODE (x
)));
20427 /* If we are copying between FP or AltiVec registers and anything else, we need
20428 a memory location. The exception is when we are targeting ppc64 and the
20429 move to/from fpr to gpr instructions are available. Also, under VSX, you
20430 can copy vector registers from the FP register set to the Altivec register
20431 set and vice versa. */
20434 rs6000_secondary_memory_needed (enum reg_class from_class
,
20435 enum reg_class to_class
,
20438 enum rs6000_reg_type from_type
, to_type
;
20439 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
20440 || (to_class
== ALTIVEC_REGS
));
20442 /* If a simple/direct move is available, we don't need secondary memory */
20443 from_type
= reg_class_to_reg_type
[(int)from_class
];
20444 to_type
= reg_class_to_reg_type
[(int)to_class
];
20446 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
20447 (secondary_reload_info
*)0, altivec_p
))
20450 /* If we have a floating point or vector register class, we need to use
20451 memory to transfer the data. */
20452 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
20458 /* Debug version of rs6000_secondary_memory_needed. */
20460 rs6000_debug_secondary_memory_needed (enum reg_class from_class
,
20461 enum reg_class to_class
,
20464 bool ret
= rs6000_secondary_memory_needed (from_class
, to_class
, mode
);
20467 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20468 "to_class = %s, mode = %s\n",
20469 ret
? "true" : "false",
20470 reg_class_names
[from_class
],
20471 reg_class_names
[to_class
],
20472 GET_MODE_NAME (mode
));
20477 /* Return the register class of a scratch register needed to copy IN into
20478 or out of a register in RCLASS in MODE. If it can be done directly,
20479 NO_REGS is returned. */
20481 static enum reg_class
20482 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
20487 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
20489 && MACHOPIC_INDIRECT
20493 /* We cannot copy a symbolic operand directly into anything
20494 other than BASE_REGS for TARGET_ELF. So indicate that a
20495 register from BASE_REGS is needed as an intermediate
20498 On Darwin, pic addresses require a load from memory, which
20499 needs a base register. */
20500 if (rclass
!= BASE_REGS
20501 && (GET_CODE (in
) == SYMBOL_REF
20502 || GET_CODE (in
) == HIGH
20503 || GET_CODE (in
) == LABEL_REF
20504 || GET_CODE (in
) == CONST
))
20508 if (GET_CODE (in
) == REG
)
20510 regno
= REGNO (in
);
20511 if (regno
>= FIRST_PSEUDO_REGISTER
)
20513 regno
= true_regnum (in
);
20514 if (regno
>= FIRST_PSEUDO_REGISTER
)
20518 else if (GET_CODE (in
) == SUBREG
)
20520 regno
= true_regnum (in
);
20521 if (regno
>= FIRST_PSEUDO_REGISTER
)
20527 /* If we have VSX register moves, prefer moving scalar values between
20528 Altivec registers and GPR by going via an FPR (and then via memory)
20529 instead of reloading the secondary memory address for Altivec moves. */
20531 && GET_MODE_SIZE (mode
) < 16
20532 && !mode_supports_vmx_dform (mode
)
20533 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20534 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20535 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20536 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20539 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20541 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20542 || (regno
>= 0 && INT_REGNO_P (regno
)))
20545 /* Constants, memory, and VSX registers can go into VSX registers (both the
20546 traditional floating point and the altivec registers). */
20547 if (rclass
== VSX_REGS
20548 && (regno
== -1 || VSX_REGNO_P (regno
)))
20551 /* Constants, memory, and FP registers can go into FP registers. */
20552 if ((regno
== -1 || FP_REGNO_P (regno
))
20553 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20554 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20556 /* Memory, and AltiVec registers can go into AltiVec registers. */
20557 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20558 && rclass
== ALTIVEC_REGS
)
20561 /* We can copy among the CR registers. */
20562 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20563 && regno
>= 0 && CR_REGNO_P (regno
))
20566 /* Otherwise, we need GENERAL_REGS. */
20567 return GENERAL_REGS
;
20570 /* Debug version of rs6000_secondary_reload_class. */
20571 static enum reg_class
20572 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20573 machine_mode mode
, rtx in
)
20575 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20577 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20578 "mode = %s, input rtx:\n",
20579 reg_class_names
[ret
], reg_class_names
[rclass
],
20580 GET_MODE_NAME (mode
));
20586 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20589 rs6000_cannot_change_mode_class (machine_mode from
,
20591 enum reg_class rclass
)
20593 unsigned from_size
= GET_MODE_SIZE (from
);
20594 unsigned to_size
= GET_MODE_SIZE (to
);
20596 if (from_size
!= to_size
)
20598 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20600 if (reg_classes_intersect_p (xclass
, rclass
))
20602 unsigned to_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][to
];
20603 unsigned from_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][from
];
20604 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20605 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20607 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20608 single register under VSX because the scalar part of the register
20609 is in the upper 64-bits, and not the lower 64-bits. Types like
20610 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20611 IEEE floating point can't overlap, and neither can small
20614 if (to_float128_vector_p
&& from_float128_vector_p
)
20617 else if (to_float128_vector_p
|| from_float128_vector_p
)
20620 /* TDmode in floating-mode registers must always go into a register
20621 pair with the most significant word in the even-numbered register
20622 to match ISA requirements. In little-endian mode, this does not
20623 match subreg numbering, so we cannot allow subregs. */
20624 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20627 if (from_size
< 8 || to_size
< 8)
20630 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20633 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20642 /* Since the VSX register set includes traditional floating point registers
20643 and altivec registers, just check for the size being different instead of
20644 trying to check whether the modes are vector modes. Otherwise it won't
20645 allow say DF and DI to change classes. For types like TFmode and TDmode
20646 that take 2 64-bit registers, rather than a single 128-bit register, don't
20647 allow subregs of those types to other 128 bit types. */
20648 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20650 unsigned num_regs
= (from_size
+ 15) / 16;
20651 if (hard_regno_nregs
[FIRST_FPR_REGNO
][to
] > num_regs
20652 || hard_regno_nregs
[FIRST_FPR_REGNO
][from
] > num_regs
)
20655 return (from_size
!= 8 && from_size
!= 16);
20658 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20659 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20665 /* Debug version of rs6000_cannot_change_mode_class. */
20667 rs6000_debug_cannot_change_mode_class (machine_mode from
,
20669 enum reg_class rclass
)
20671 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
20674 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20675 "to = %s, rclass = %s\n",
20676 ret
? "true" : "false",
20677 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20678 reg_class_names
[rclass
]);
20683 /* Return a string to do a move operation of 128 bits of data. */
20686 rs6000_output_move_128bit (rtx operands
[])
20688 rtx dest
= operands
[0];
20689 rtx src
= operands
[1];
20690 machine_mode mode
= GET_MODE (dest
);
20693 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20694 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20698 dest_regno
= REGNO (dest
);
20699 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20700 dest_fp_p
= FP_REGNO_P (dest_regno
);
20701 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20702 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20707 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20712 src_regno
= REGNO (src
);
20713 src_gpr_p
= INT_REGNO_P (src_regno
);
20714 src_fp_p
= FP_REGNO_P (src_regno
);
20715 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20716 src_vsx_p
= src_fp_p
| src_vmx_p
;
20721 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20724 /* Register moves. */
20725 if (dest_regno
>= 0 && src_regno
>= 0)
20732 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20733 return (WORDS_BIG_ENDIAN
20734 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20735 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20737 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20741 else if (TARGET_VSX
&& dest_vsx_p
)
20744 return "xxlor %x0,%x1,%x1";
20746 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20747 return (WORDS_BIG_ENDIAN
20748 ? "mtvsrdd %x0,%1,%L1"
20749 : "mtvsrdd %x0,%L1,%1");
20751 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20755 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20756 return "vor %0,%1,%1";
20758 else if (dest_fp_p
&& src_fp_p
)
20763 else if (dest_regno
>= 0 && MEM_P (src
))
20767 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20773 else if (TARGET_ALTIVEC
&& dest_vmx_p
20774 && altivec_indexed_or_indirect_operand (src
, mode
))
20775 return "lvx %0,%y1";
20777 else if (TARGET_VSX
&& dest_vsx_p
)
20779 if (mode_supports_vsx_dform_quad (mode
)
20780 && quad_address_p (XEXP (src
, 0), mode
, true))
20781 return "lxv %x0,%1";
20783 else if (TARGET_P9_VECTOR
)
20784 return "lxvx %x0,%y1";
20786 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20787 return "lxvw4x %x0,%y1";
20790 return "lxvd2x %x0,%y1";
20793 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20794 return "lvx %0,%y1";
20796 else if (dest_fp_p
)
20801 else if (src_regno
>= 0 && MEM_P (dest
))
20805 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20806 return "stq %1,%0";
20811 else if (TARGET_ALTIVEC
&& src_vmx_p
20812 && altivec_indexed_or_indirect_operand (src
, mode
))
20813 return "stvx %1,%y0";
20815 else if (TARGET_VSX
&& src_vsx_p
)
20817 if (mode_supports_vsx_dform_quad (mode
)
20818 && quad_address_p (XEXP (dest
, 0), mode
, true))
20819 return "stxv %x1,%0";
20821 else if (TARGET_P9_VECTOR
)
20822 return "stxvx %x1,%y0";
20824 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20825 return "stxvw4x %x1,%y0";
20828 return "stxvd2x %x1,%y0";
20831 else if (TARGET_ALTIVEC
&& src_vmx_p
)
20832 return "stvx %1,%y0";
20839 else if (dest_regno
>= 0
20840 && (GET_CODE (src
) == CONST_INT
20841 || GET_CODE (src
) == CONST_WIDE_INT
20842 || GET_CODE (src
) == CONST_DOUBLE
20843 || GET_CODE (src
) == CONST_VECTOR
))
20848 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
20849 || (dest_vsx_p
&& TARGET_VSX
))
20850 return output_vec_const_move (operands
);
20853 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
20856 /* Validate a 128-bit move. */
20858 rs6000_move_128bit_ok_p (rtx operands
[])
20860 machine_mode mode
= GET_MODE (operands
[0]);
20861 return (gpc_reg_operand (operands
[0], mode
)
20862 || gpc_reg_operand (operands
[1], mode
));
20865 /* Return true if a 128-bit move needs to be split. */
20867 rs6000_split_128bit_ok_p (rtx operands
[])
20869 if (!reload_completed
)
20872 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
20875 if (quad_load_store_p (operands
[0], operands
[1]))
20882 /* Given a comparison operation, return the bit number in CCR to test. We
20883 know this is a valid comparison.
20885 SCC_P is 1 if this is for an scc. That means that %D will have been
20886 used instead of %C, so the bits will be in different places.
20888 Return -1 if OP isn't a valid comparison for some reason. */
20891 ccr_bit (rtx op
, int scc_p
)
20893 enum rtx_code code
= GET_CODE (op
);
20894 machine_mode cc_mode
;
20899 if (!COMPARISON_P (op
))
20902 reg
= XEXP (op
, 0);
20904 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
20906 cc_mode
= GET_MODE (reg
);
20907 cc_regnum
= REGNO (reg
);
20908 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
20910 validate_condition_mode (code
, cc_mode
);
20912 /* When generating a sCOND operation, only positive conditions are
20915 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
20916 || code
== GTU
|| code
== LTU
);
20921 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
20923 return base_bit
+ 2;
20924 case GT
: case GTU
: case UNLE
:
20925 return base_bit
+ 1;
20926 case LT
: case LTU
: case UNGE
:
20928 case ORDERED
: case UNORDERED
:
20929 return base_bit
+ 3;
20932 /* If scc, we will have done a cror to put the bit in the
20933 unordered position. So test that bit. For integer, this is ! LT
20934 unless this is an scc insn. */
20935 return scc_p
? base_bit
+ 3 : base_bit
;
20938 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
20941 gcc_unreachable ();
20945 /* Return the GOT register. */
20948 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
20950 /* The second flow pass currently (June 1999) can't update
20951 regs_ever_live without disturbing other parts of the compiler, so
20952 update it here to make the prolog/epilogue code happy. */
20953 if (!can_create_pseudo_p ()
20954 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
20955 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
20957 crtl
->uses_pic_offset_table
= 1;
20959 return pic_offset_table_rtx
;
20962 static rs6000_stack_t stack_info
;
20964 /* Function to init struct machine_function.
20965 This will be called, via a pointer variable,
20966 from push_function_context. */
20968 static struct machine_function
*
20969 rs6000_init_machine_status (void)
20971 stack_info
.reload_completed
= 0;
20972 return ggc_cleared_alloc
<machine_function
> ();
20975 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20977 /* Write out a function code label. */
20980 rs6000_output_function_entry (FILE *file
, const char *fname
)
20982 if (fname
[0] != '.')
20984 switch (DEFAULT_ABI
)
20987 gcc_unreachable ();
20993 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
21003 RS6000_OUTPUT_BASENAME (file
, fname
);
21006 /* Print an operand. Recognize special options, documented below. */
21009 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21010 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21012 #define SMALL_DATA_RELOC "sda21"
21013 #define SMALL_DATA_REG 0
21017 print_operand (FILE *file
, rtx x
, int code
)
21020 unsigned HOST_WIDE_INT uval
;
21024 /* %a is output_address. */
21026 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21030 /* Like 'J' but get to the GT bit only. */
21031 gcc_assert (REG_P (x
));
21033 /* Bit 1 is GT bit. */
21034 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
21036 /* Add one for shift count in rlinm for scc. */
21037 fprintf (file
, "%d", i
+ 1);
21041 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21044 output_operand_lossage ("invalid %%e value");
21049 if ((uval
& 0xffff) == 0 && uval
!= 0)
21054 /* X is a CR register. Print the number of the EQ bit of the CR */
21055 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21056 output_operand_lossage ("invalid %%E value");
21058 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
21062 /* X is a CR register. Print the shift count needed to move it
21063 to the high-order four bits. */
21064 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21065 output_operand_lossage ("invalid %%f value");
21067 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
21071 /* Similar, but print the count for the rotate in the opposite
21073 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21074 output_operand_lossage ("invalid %%F value");
21076 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
21080 /* X is a constant integer. If it is negative, print "m",
21081 otherwise print "z". This is to make an aze or ame insn. */
21082 if (GET_CODE (x
) != CONST_INT
)
21083 output_operand_lossage ("invalid %%G value");
21084 else if (INTVAL (x
) >= 0)
21091 /* If constant, output low-order five bits. Otherwise, write
21094 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
21096 print_operand (file
, x
, 0);
21100 /* If constant, output low-order six bits. Otherwise, write
21103 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
21105 print_operand (file
, x
, 0);
21109 /* Print `i' if this is a constant, else nothing. */
21115 /* Write the bit number in CCR for jump. */
21116 i
= ccr_bit (x
, 0);
21118 output_operand_lossage ("invalid %%j code");
21120 fprintf (file
, "%d", i
);
21124 /* Similar, but add one for shift count in rlinm for scc and pass
21125 scc flag to `ccr_bit'. */
21126 i
= ccr_bit (x
, 1);
21128 output_operand_lossage ("invalid %%J code");
21130 /* If we want bit 31, write a shift count of zero, not 32. */
21131 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21135 /* X must be a constant. Write the 1's complement of the
21138 output_operand_lossage ("invalid %%k value");
21140 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
21144 /* X must be a symbolic constant on ELF. Write an
21145 expression suitable for an 'addi' that adds in the low 16
21146 bits of the MEM. */
21147 if (GET_CODE (x
) == CONST
)
21149 if (GET_CODE (XEXP (x
, 0)) != PLUS
21150 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
21151 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
21152 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
21153 output_operand_lossage ("invalid %%K value");
21155 print_operand_address (file
, x
);
21156 fputs ("@l", file
);
21159 /* %l is output_asm_label. */
21162 /* Write second word of DImode or DFmode reference. Works on register
21163 or non-indexed memory only. */
21165 fputs (reg_names
[REGNO (x
) + 1], file
);
21166 else if (MEM_P (x
))
21168 machine_mode mode
= GET_MODE (x
);
21169 /* Handle possible auto-increment. Since it is pre-increment and
21170 we have already done it, we can just use an offset of word. */
21171 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21172 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21173 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21175 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21176 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
21179 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
21183 if (small_data_operand (x
, GET_MODE (x
)))
21184 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21185 reg_names
[SMALL_DATA_REG
]);
21190 /* Write the number of elements in the vector times 4. */
21191 if (GET_CODE (x
) != PARALLEL
)
21192 output_operand_lossage ("invalid %%N value");
21194 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
21198 /* Similar, but subtract 1 first. */
21199 if (GET_CODE (x
) != PARALLEL
)
21200 output_operand_lossage ("invalid %%O value");
21202 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
21206 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21209 || (i
= exact_log2 (INTVAL (x
))) < 0)
21210 output_operand_lossage ("invalid %%p value");
21212 fprintf (file
, "%d", i
);
21216 /* The operand must be an indirect memory reference. The result
21217 is the register name. */
21218 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
21219 || REGNO (XEXP (x
, 0)) >= 32)
21220 output_operand_lossage ("invalid %%P value");
21222 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
21226 /* This outputs the logical code corresponding to a boolean
21227 expression. The expression may have one or both operands
21228 negated (if one, only the first one). For condition register
21229 logical operations, it will also treat the negated
21230 CR codes as NOTs, but not handle NOTs of them. */
21232 const char *const *t
= 0;
21234 enum rtx_code code
= GET_CODE (x
);
21235 static const char * const tbl
[3][3] = {
21236 { "and", "andc", "nor" },
21237 { "or", "orc", "nand" },
21238 { "xor", "eqv", "xor" } };
21242 else if (code
== IOR
)
21244 else if (code
== XOR
)
21247 output_operand_lossage ("invalid %%q value");
21249 if (GET_CODE (XEXP (x
, 0)) != NOT
)
21253 if (GET_CODE (XEXP (x
, 1)) == NOT
)
21264 if (! TARGET_MFCRF
)
21270 /* X is a CR register. Print the mask for `mtcrf'. */
21271 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
21272 output_operand_lossage ("invalid %%R value");
21274 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
21278 /* Low 5 bits of 32 - value */
21280 output_operand_lossage ("invalid %%s value");
21282 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
21286 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21287 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
21289 /* Bit 3 is OV bit. */
21290 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
21292 /* If we want bit 31, write a shift count of zero, not 32. */
21293 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
21297 /* Print the symbolic name of a branch target register. */
21298 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
21299 && REGNO (x
) != CTR_REGNO
))
21300 output_operand_lossage ("invalid %%T value");
21301 else if (REGNO (x
) == LR_REGNO
)
21302 fputs ("lr", file
);
21304 fputs ("ctr", file
);
21308 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21309 for use in unsigned operand. */
21312 output_operand_lossage ("invalid %%u value");
21317 if ((uval
& 0xffff) == 0)
21320 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
21324 /* High-order 16 bits of constant for use in signed operand. */
21326 output_operand_lossage ("invalid %%v value");
21328 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
21329 (INTVAL (x
) >> 16) & 0xffff);
21333 /* Print `u' if this has an auto-increment or auto-decrement. */
21335 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
21336 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
21337 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
21342 /* Print the trap code for this operand. */
21343 switch (GET_CODE (x
))
21346 fputs ("eq", file
); /* 4 */
21349 fputs ("ne", file
); /* 24 */
21352 fputs ("lt", file
); /* 16 */
21355 fputs ("le", file
); /* 20 */
21358 fputs ("gt", file
); /* 8 */
21361 fputs ("ge", file
); /* 12 */
21364 fputs ("llt", file
); /* 2 */
21367 fputs ("lle", file
); /* 6 */
21370 fputs ("lgt", file
); /* 1 */
21373 fputs ("lge", file
); /* 5 */
21376 gcc_unreachable ();
21381 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21384 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
21385 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
21387 print_operand (file
, x
, 0);
21391 /* X is a FPR or Altivec register used in a VSX context. */
21392 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
21393 output_operand_lossage ("invalid %%x value");
21396 int reg
= REGNO (x
);
21397 int vsx_reg
= (FP_REGNO_P (reg
)
21399 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
21401 #ifdef TARGET_REGNAMES
21402 if (TARGET_REGNAMES
)
21403 fprintf (file
, "%%vs%d", vsx_reg
);
21406 fprintf (file
, "%d", vsx_reg
);
21412 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
21413 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
21414 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
21419 /* Like 'L', for third word of TImode/PTImode */
21421 fputs (reg_names
[REGNO (x
) + 2], file
);
21422 else if (MEM_P (x
))
21424 machine_mode mode
= GET_MODE (x
);
21425 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21426 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21427 output_address (mode
, plus_constant (Pmode
,
21428 XEXP (XEXP (x
, 0), 0), 8));
21429 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21430 output_address (mode
, plus_constant (Pmode
,
21431 XEXP (XEXP (x
, 0), 0), 8));
21433 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
21434 if (small_data_operand (x
, GET_MODE (x
)))
21435 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21436 reg_names
[SMALL_DATA_REG
]);
21441 /* X is a SYMBOL_REF. Write out the name preceded by a
21442 period and without any trailing data in brackets. Used for function
21443 names. If we are configured for System V (or the embedded ABI) on
21444 the PowerPC, do not emit the period, since those systems do not use
21445 TOCs and the like. */
21446 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
21448 /* For macho, check to see if we need a stub. */
21451 const char *name
= XSTR (x
, 0);
21453 if (darwin_emit_branch_islands
21454 && MACHOPIC_INDIRECT
21455 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
21456 name
= machopic_indirection_name (x
, /*stub_p=*/true);
21458 assemble_name (file
, name
);
21460 else if (!DOT_SYMBOLS
)
21461 assemble_name (file
, XSTR (x
, 0));
21463 rs6000_output_function_entry (file
, XSTR (x
, 0));
21467 /* Like 'L', for last word of TImode/PTImode. */
21469 fputs (reg_names
[REGNO (x
) + 3], file
);
21470 else if (MEM_P (x
))
21472 machine_mode mode
= GET_MODE (x
);
21473 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
21474 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21475 output_address (mode
, plus_constant (Pmode
,
21476 XEXP (XEXP (x
, 0), 0), 12));
21477 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21478 output_address (mode
, plus_constant (Pmode
,
21479 XEXP (XEXP (x
, 0), 0), 12));
21481 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
21482 if (small_data_operand (x
, GET_MODE (x
)))
21483 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21484 reg_names
[SMALL_DATA_REG
]);
21488 /* Print AltiVec memory operand. */
21493 gcc_assert (MEM_P (x
));
21497 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
21498 && GET_CODE (tmp
) == AND
21499 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21500 && INTVAL (XEXP (tmp
, 1)) == -16)
21501 tmp
= XEXP (tmp
, 0);
21502 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21503 && GET_CODE (tmp
) == PRE_MODIFY
)
21504 tmp
= XEXP (tmp
, 1);
21506 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21509 if (GET_CODE (tmp
) != PLUS
21510 || !REG_P (XEXP (tmp
, 0))
21511 || !REG_P (XEXP (tmp
, 1)))
21513 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21517 if (REGNO (XEXP (tmp
, 0)) == 0)
21518 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21519 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21521 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21522 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21529 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21530 else if (MEM_P (x
))
21532 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21533 know the width from the mode. */
21534 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21535 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21536 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21537 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21538 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21539 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21540 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21541 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21543 output_address (GET_MODE (x
), XEXP (x
, 0));
21547 if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21548 /* This hack along with a corresponding hack in
21549 rs6000_output_addr_const_extra arranges to output addends
21550 where the assembler expects to find them. eg.
21551 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21552 without this hack would be output as "x@toc+4". We
21554 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21556 output_addr_const (file
, x
);
21561 if (const char *name
= get_some_local_dynamic_name ())
21562 assemble_name (file
, name
);
21564 output_operand_lossage ("'%%&' used without any "
21565 "local dynamic TLS references");
21569 output_operand_lossage ("invalid %%xn code");
21573 /* Print the address of an operand. */
21576 print_operand_address (FILE *file
, rtx x
)
21579 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21580 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21581 || GET_CODE (x
) == LABEL_REF
)
21583 output_addr_const (file
, x
);
21584 if (small_data_operand (x
, GET_MODE (x
)))
21585 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21586 reg_names
[SMALL_DATA_REG
]);
21588 gcc_assert (!TARGET_TOC
);
21590 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21591 && REG_P (XEXP (x
, 1)))
21593 if (REGNO (XEXP (x
, 0)) == 0)
21594 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21595 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21597 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21598 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21600 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21601 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21602 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21603 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21605 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21606 && CONSTANT_P (XEXP (x
, 1)))
21608 fprintf (file
, "lo16(");
21609 output_addr_const (file
, XEXP (x
, 1));
21610 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21614 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21615 && CONSTANT_P (XEXP (x
, 1)))
21617 output_addr_const (file
, XEXP (x
, 1));
21618 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21621 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21623 /* This hack along with a corresponding hack in
21624 rs6000_output_addr_const_extra arranges to output addends
21625 where the assembler expects to find them. eg.
21627 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21628 without this hack would be output as "x@toc+8@l(9)". We
21629 want "x+8@toc@l(9)". */
21630 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21631 if (GET_CODE (x
) == LO_SUM
)
21632 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21634 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21637 gcc_unreachable ();
21640 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21643 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21645 if (GET_CODE (x
) == UNSPEC
)
21646 switch (XINT (x
, 1))
21648 case UNSPEC_TOCREL
:
21649 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21650 && REG_P (XVECEXP (x
, 0, 1))
21651 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21652 output_addr_const (file
, XVECEXP (x
, 0, 0));
21653 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21655 if (INTVAL (tocrel_offset_oac
) >= 0)
21656 fprintf (file
, "+");
21657 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21659 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21662 assemble_name (file
, toc_label_name
);
21665 else if (TARGET_ELF
)
21666 fputs ("@toc", file
);
21670 case UNSPEC_MACHOPIC_OFFSET
:
21671 output_addr_const (file
, XVECEXP (x
, 0, 0));
21673 machopic_output_function_base_name (file
);
21680 /* Target hook for assembling integer objects. The PowerPC version has
21681 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21682 is defined. It also needs to handle DI-mode objects on 64-bit
21686 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21688 #ifdef RELOCATABLE_NEEDS_FIXUP
21689 /* Special handling for SI values. */
21690 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21692 static int recurse
= 0;
21694 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21695 the .fixup section. Since the TOC section is already relocated, we
21696 don't need to mark it here. We used to skip the text section, but it
21697 should never be valid for relocated addresses to be placed in the text
21699 if (DEFAULT_ABI
== ABI_V4
21700 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21701 && in_section
!= toc_section
21703 && !CONST_SCALAR_INT_P (x
)
21709 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21711 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21712 fprintf (asm_out_file
, "\t.long\t(");
21713 output_addr_const (asm_out_file
, x
);
21714 fprintf (asm_out_file
, ")@fixup\n");
21715 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21716 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21717 fprintf (asm_out_file
, "\t.long\t");
21718 assemble_name (asm_out_file
, buf
);
21719 fprintf (asm_out_file
, "\n\t.previous\n");
21723 /* Remove initial .'s to turn a -mcall-aixdesc function
21724 address into the address of the descriptor, not the function
21726 else if (GET_CODE (x
) == SYMBOL_REF
21727 && XSTR (x
, 0)[0] == '.'
21728 && DEFAULT_ABI
== ABI_AIX
)
21730 const char *name
= XSTR (x
, 0);
21731 while (*name
== '.')
21734 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21738 #endif /* RELOCATABLE_NEEDS_FIXUP */
21739 return default_assemble_integer (x
, size
, aligned_p
);
21742 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21743 /* Emit an assembler directive to set symbol visibility for DECL to
21744 VISIBILITY_TYPE. */
21747 rs6000_assemble_visibility (tree decl
, int vis
)
21752 /* Functions need to have their entry point symbol visibility set as
21753 well as their descriptor symbol visibility. */
21754 if (DEFAULT_ABI
== ABI_AIX
21756 && TREE_CODE (decl
) == FUNCTION_DECL
)
21758 static const char * const visibility_types
[] = {
21759 NULL
, "protected", "hidden", "internal"
21762 const char *name
, *type
;
21764 name
= ((* targetm
.strip_name_encoding
)
21765 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21766 type
= visibility_types
[vis
];
21768 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21769 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21772 default_assemble_visibility (decl
, vis
);
21777 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21779 /* Reversal of FP compares takes care -- an ordered compare
21780 becomes an unordered compare and vice versa. */
21781 if (mode
== CCFPmode
21782 && (!flag_finite_math_only
21783 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21784 || code
== UNEQ
|| code
== LTGT
))
21785 return reverse_condition_maybe_unordered (code
);
21787 return reverse_condition (code
);
21790 /* Generate a compare for CODE. Return a brand-new rtx that
21791 represents the result of the compare. */
21794 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21796 machine_mode comp_mode
;
21797 rtx compare_result
;
21798 enum rtx_code code
= GET_CODE (cmp
);
21799 rtx op0
= XEXP (cmp
, 0);
21800 rtx op1
= XEXP (cmp
, 1);
21802 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21803 comp_mode
= CCmode
;
21804 else if (FLOAT_MODE_P (mode
))
21805 comp_mode
= CCFPmode
;
21806 else if (code
== GTU
|| code
== LTU
21807 || code
== GEU
|| code
== LEU
)
21808 comp_mode
= CCUNSmode
;
21809 else if ((code
== EQ
|| code
== NE
)
21810 && unsigned_reg_p (op0
)
21811 && (unsigned_reg_p (op1
)
21812 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21813 /* These are unsigned values, perhaps there will be a later
21814 ordering compare that can be shared with this one. */
21815 comp_mode
= CCUNSmode
;
21817 comp_mode
= CCmode
;
21819 /* If we have an unsigned compare, make sure we don't have a signed value as
21821 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21822 && INTVAL (op1
) < 0)
21824 op0
= copy_rtx_if_shared (op0
);
21825 op1
= force_reg (GET_MODE (op0
), op1
);
21826 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
21829 /* First, the compare. */
21830 compare_result
= gen_reg_rtx (comp_mode
);
21832 /* IEEE 128-bit support in VSX registers when we do not have hardware
21834 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21836 rtx libfunc
= NULL_RTX
;
21837 bool check_nan
= false;
21844 libfunc
= optab_libfunc (eq_optab
, mode
);
21849 libfunc
= optab_libfunc (ge_optab
, mode
);
21854 libfunc
= optab_libfunc (le_optab
, mode
);
21859 libfunc
= optab_libfunc (unord_optab
, mode
);
21860 code
= (code
== UNORDERED
) ? NE
: EQ
;
21866 libfunc
= optab_libfunc (ge_optab
, mode
);
21867 code
= (code
== UNGE
) ? GE
: GT
;
21873 libfunc
= optab_libfunc (le_optab
, mode
);
21874 code
= (code
== UNLE
) ? LE
: LT
;
21880 libfunc
= optab_libfunc (eq_optab
, mode
);
21881 code
= (code
= UNEQ
) ? EQ
: NE
;
21885 gcc_unreachable ();
21888 gcc_assert (libfunc
);
21891 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21892 SImode
, 2, op0
, mode
, op1
, mode
);
21894 /* The library signals an exception for signalling NaNs, so we need to
21895 handle isgreater, etc. by first checking isordered. */
21898 rtx ne_rtx
, normal_dest
, unord_dest
;
21899 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
21900 rtx join_label
= gen_label_rtx ();
21901 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
21902 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
21905 /* Test for either value being a NaN. */
21906 gcc_assert (unord_func
);
21907 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
21908 SImode
, 2, op0
, mode
, op1
,
21911 /* Set value (0) if either value is a NaN, and jump to the join
21913 dest
= gen_reg_rtx (SImode
);
21914 emit_move_insn (dest
, const1_rtx
);
21915 emit_insn (gen_rtx_SET (unord_cmp
,
21916 gen_rtx_COMPARE (comp_mode
, unord_dest
,
21919 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
21920 emit_jump_insn (gen_rtx_SET (pc_rtx
,
21921 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
21925 /* Do the normal comparison, knowing that the values are not
21927 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21928 SImode
, 2, op0
, mode
, op1
,
21931 emit_insn (gen_cstoresi4 (dest
,
21932 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
21934 normal_dest
, const0_rtx
));
21936 /* Join NaN and non-Nan paths. Compare dest against 0. */
21937 emit_label (join_label
);
21941 emit_insn (gen_rtx_SET (compare_result
,
21942 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
21947 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21948 CLOBBERs to match cmptf_internal2 pattern. */
21949 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
21950 && FLOAT128_IBM_P (GET_MODE (op0
))
21951 && TARGET_HARD_FLOAT
)
21952 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
21954 gen_rtx_SET (compare_result
,
21955 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
21956 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21957 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21958 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21959 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21960 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21961 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21962 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21963 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21964 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
21965 else if (GET_CODE (op1
) == UNSPEC
21966 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
21968 rtx op1b
= XVECEXP (op1
, 0, 0);
21969 comp_mode
= CCEQmode
;
21970 compare_result
= gen_reg_rtx (CCEQmode
);
21972 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
21974 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
21977 emit_insn (gen_rtx_SET (compare_result
,
21978 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
21981 /* Some kinds of FP comparisons need an OR operation;
21982 under flag_finite_math_only we don't bother. */
21983 if (FLOAT_MODE_P (mode
)
21984 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
21985 && !flag_finite_math_only
21986 && (code
== LE
|| code
== GE
21987 || code
== UNEQ
|| code
== LTGT
21988 || code
== UNGT
|| code
== UNLT
))
21990 enum rtx_code or1
, or2
;
21991 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
21992 rtx or_result
= gen_reg_rtx (CCEQmode
);
21996 case LE
: or1
= LT
; or2
= EQ
; break;
21997 case GE
: or1
= GT
; or2
= EQ
; break;
21998 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
21999 case LTGT
: or1
= LT
; or2
= GT
; break;
22000 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
22001 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
22002 default: gcc_unreachable ();
22004 validate_condition_mode (or1
, comp_mode
);
22005 validate_condition_mode (or2
, comp_mode
);
22006 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
22007 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
22008 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
22009 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
22011 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
22013 compare_result
= or_result
;
22017 validate_condition_mode (code
, GET_MODE (compare_result
));
22019 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
22023 /* Return the diagnostic message string if the binary operation OP is
22024 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22027 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
22031 machine_mode mode1
= TYPE_MODE (type1
);
22032 machine_mode mode2
= TYPE_MODE (type2
);
22034 /* For complex modes, use the inner type. */
22035 if (COMPLEX_MODE_P (mode1
))
22036 mode1
= GET_MODE_INNER (mode1
);
22038 if (COMPLEX_MODE_P (mode2
))
22039 mode2
= GET_MODE_INNER (mode2
);
22041 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22042 double to intermix unless -mfloat128-convert. */
22043 if (mode1
== mode2
)
22046 if (!TARGET_FLOAT128_CVT
)
22048 if ((mode1
== KFmode
&& mode2
== IFmode
)
22049 || (mode1
== IFmode
&& mode2
== KFmode
))
22050 return N_("__float128 and __ibm128 cannot be used in the same "
22053 if (TARGET_IEEEQUAD
22054 && ((mode1
== IFmode
&& mode2
== TFmode
)
22055 || (mode1
== TFmode
&& mode2
== IFmode
)))
22056 return N_("__ibm128 and long double cannot be used in the same "
22059 if (!TARGET_IEEEQUAD
22060 && ((mode1
== KFmode
&& mode2
== TFmode
)
22061 || (mode1
== TFmode
&& mode2
== KFmode
)))
22062 return N_("__float128 and long double cannot be used in the same "
22070 /* Expand floating point conversion to/from __float128 and __ibm128. */
22073 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
22075 machine_mode dest_mode
= GET_MODE (dest
);
22076 machine_mode src_mode
= GET_MODE (src
);
22077 convert_optab cvt
= unknown_optab
;
22078 bool do_move
= false;
22079 rtx libfunc
= NULL_RTX
;
22081 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
22082 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
22086 rtx_2func_t from_df
;
22087 rtx_2func_t from_sf
;
22088 rtx_2func_t from_si_sign
;
22089 rtx_2func_t from_si_uns
;
22090 rtx_2func_t from_di_sign
;
22091 rtx_2func_t from_di_uns
;
22094 rtx_2func_t to_si_sign
;
22095 rtx_2func_t to_si_uns
;
22096 rtx_2func_t to_di_sign
;
22097 rtx_2func_t to_di_uns
;
22098 } hw_conversions
[2] = {
22099 /* convertions to/from KFmode */
22101 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
22102 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
22103 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
22104 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
22105 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
22106 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
22107 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
22108 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
22109 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
22110 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
22111 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
22112 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
22115 /* convertions to/from TFmode */
22117 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
22118 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
22119 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
22120 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
22121 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
22122 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
22123 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
22124 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
22125 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
22126 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
22127 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
22128 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
22132 if (dest_mode
== src_mode
)
22133 gcc_unreachable ();
22135 /* Eliminate memory operations. */
22137 src
= force_reg (src_mode
, src
);
22141 rtx tmp
= gen_reg_rtx (dest_mode
);
22142 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
22143 rs6000_emit_move (dest
, tmp
, dest_mode
);
22147 /* Convert to IEEE 128-bit floating point. */
22148 if (FLOAT128_IEEE_P (dest_mode
))
22150 if (dest_mode
== KFmode
)
22152 else if (dest_mode
== TFmode
)
22155 gcc_unreachable ();
22161 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
22166 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
22172 if (FLOAT128_IBM_P (src_mode
))
22181 cvt
= ufloat_optab
;
22182 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
22186 cvt
= sfloat_optab
;
22187 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
22194 cvt
= ufloat_optab
;
22195 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
22199 cvt
= sfloat_optab
;
22200 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
22205 gcc_unreachable ();
22209 /* Convert from IEEE 128-bit floating point. */
22210 else if (FLOAT128_IEEE_P (src_mode
))
22212 if (src_mode
== KFmode
)
22214 else if (src_mode
== TFmode
)
22217 gcc_unreachable ();
22223 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
22228 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
22234 if (FLOAT128_IBM_P (dest_mode
))
22244 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
22249 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
22257 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
22262 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
22267 gcc_unreachable ();
22271 /* Both IBM format. */
22272 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
22276 gcc_unreachable ();
22278 /* Handle conversion between TFmode/KFmode. */
22280 emit_move_insn (dest
, gen_lowpart (dest_mode
, src
));
22282 /* Handle conversion if we have hardware support. */
22283 else if (TARGET_FLOAT128_HW
&& hw_convert
)
22284 emit_insn ((hw_convert
) (dest
, src
));
22286 /* Call an external function to do the conversion. */
22287 else if (cvt
!= unknown_optab
)
22289 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
22290 gcc_assert (libfunc
!= NULL_RTX
);
22292 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
, 1, src
,
22295 gcc_assert (dest2
!= NULL_RTX
);
22296 if (!rtx_equal_p (dest
, dest2
))
22297 emit_move_insn (dest
, dest2
);
22301 gcc_unreachable ();
22307 /* Emit the RTL for an sISEL pattern. */
22310 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
22312 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
22315 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22316 can be used as that dest register. Return the dest register. */
22319 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
22321 if (op2
== const0_rtx
)
22324 if (GET_CODE (scratch
) == SCRATCH
)
22325 scratch
= gen_reg_rtx (mode
);
22327 if (logical_operand (op2
, mode
))
22328 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
22330 emit_insn (gen_rtx_SET (scratch
,
22331 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
22337 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
22340 machine_mode op_mode
;
22341 enum rtx_code cond_code
;
22342 rtx result
= operands
[0];
22344 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
22345 cond_code
= GET_CODE (condition_rtx
);
22347 if (cond_code
== NE
22348 || cond_code
== GE
|| cond_code
== LE
22349 || cond_code
== GEU
|| cond_code
== LEU
22350 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
22352 rtx not_result
= gen_reg_rtx (CCEQmode
);
22353 rtx not_op
, rev_cond_rtx
;
22354 machine_mode cc_mode
;
22356 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
22358 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
22359 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
22360 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
22361 emit_insn (gen_rtx_SET (not_result
, not_op
));
22362 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
22365 op_mode
= GET_MODE (XEXP (operands
[1], 0));
22366 if (op_mode
== VOIDmode
)
22367 op_mode
= GET_MODE (XEXP (operands
[1], 1));
22369 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
22371 PUT_MODE (condition_rtx
, DImode
);
22372 convert_move (result
, condition_rtx
, 0);
22376 PUT_MODE (condition_rtx
, SImode
);
22377 emit_insn (gen_rtx_SET (result
, condition_rtx
));
22381 /* Emit a branch of kind CODE to location LOC. */
22384 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
22386 rtx condition_rtx
, loc_ref
;
22388 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
22389 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
22390 emit_jump_insn (gen_rtx_SET (pc_rtx
,
22391 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
22392 loc_ref
, pc_rtx
)));
22395 /* Return the string to output a conditional branch to LABEL, which is
22396 the operand template of the label, or NULL if the branch is really a
22397 conditional return.
22399 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22400 condition code register and its mode specifies what kind of
22401 comparison we made.
22403 REVERSED is nonzero if we should reverse the sense of the comparison.
22405 INSN is the insn. */
22408 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
22410 static char string
[64];
22411 enum rtx_code code
= GET_CODE (op
);
22412 rtx cc_reg
= XEXP (op
, 0);
22413 machine_mode mode
= GET_MODE (cc_reg
);
22414 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
22415 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
22416 int really_reversed
= reversed
^ need_longbranch
;
22422 validate_condition_mode (code
, mode
);
22424 /* Work out which way this really branches. We could use
22425 reverse_condition_maybe_unordered here always but this
22426 makes the resulting assembler clearer. */
22427 if (really_reversed
)
22429 /* Reversal of FP compares takes care -- an ordered compare
22430 becomes an unordered compare and vice versa. */
22431 if (mode
== CCFPmode
)
22432 code
= reverse_condition_maybe_unordered (code
);
22434 code
= reverse_condition (code
);
22439 /* Not all of these are actually distinct opcodes, but
22440 we distinguish them for clarity of the resulting assembler. */
22441 case NE
: case LTGT
:
22442 ccode
= "ne"; break;
22443 case EQ
: case UNEQ
:
22444 ccode
= "eq"; break;
22446 ccode
= "ge"; break;
22447 case GT
: case GTU
: case UNGT
:
22448 ccode
= "gt"; break;
22450 ccode
= "le"; break;
22451 case LT
: case LTU
: case UNLT
:
22452 ccode
= "lt"; break;
22453 case UNORDERED
: ccode
= "un"; break;
22454 case ORDERED
: ccode
= "nu"; break;
22455 case UNGE
: ccode
= "nl"; break;
22456 case UNLE
: ccode
= "ng"; break;
22458 gcc_unreachable ();
22461 /* Maybe we have a guess as to how likely the branch is. */
22463 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
22464 if (note
!= NULL_RTX
)
22466 /* PROB is the difference from 50%. */
22467 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
22468 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
22470 /* Only hint for highly probable/improbable branches on newer cpus when
22471 we have real profile data, as static prediction overrides processor
22472 dynamic prediction. For older cpus we may as well always hint, but
22473 assume not taken for branches that are very close to 50% as a
22474 mispredicted taken branch is more expensive than a
22475 mispredicted not-taken branch. */
22476 if (rs6000_always_hint
22477 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
22478 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
22479 && br_prob_note_reliable_p (note
)))
22481 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
22482 && ((prob
> 0) ^ need_longbranch
))
22490 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
22492 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
22494 /* We need to escape any '%' characters in the reg_names string.
22495 Assume they'd only be the first character.... */
22496 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
22498 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
22502 /* If the branch distance was too far, we may have to use an
22503 unconditional branch to go the distance. */
22504 if (need_longbranch
)
22505 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22507 s
+= sprintf (s
, ",%s", label
);
22513 /* Return insn for VSX or Altivec comparisons. */
22516 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22519 machine_mode mode
= GET_MODE (op0
);
22527 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22538 mask
= gen_reg_rtx (mode
);
22539 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22546 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22547 DMODE is expected destination mode. This is a recursive function. */
22550 rs6000_emit_vector_compare (enum rtx_code rcode
,
22552 machine_mode dmode
)
22555 bool swap_operands
= false;
22556 bool try_again
= false;
22558 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22559 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22561 /* See if the comparison works as is. */
22562 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22570 swap_operands
= true;
22575 swap_operands
= true;
22583 /* Invert condition and try again.
22584 e.g., A != B becomes ~(A==B). */
22586 enum rtx_code rev_code
;
22587 enum insn_code nor_code
;
22590 rev_code
= reverse_condition_maybe_unordered (rcode
);
22591 if (rev_code
== UNKNOWN
)
22594 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22595 if (nor_code
== CODE_FOR_nothing
)
22598 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22602 mask
= gen_reg_rtx (dmode
);
22603 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22611 /* Try GT/GTU/LT/LTU OR EQ */
22614 enum insn_code ior_code
;
22615 enum rtx_code new_code
;
22636 gcc_unreachable ();
22639 ior_code
= optab_handler (ior_optab
, dmode
);
22640 if (ior_code
== CODE_FOR_nothing
)
22643 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22647 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22651 mask
= gen_reg_rtx (dmode
);
22652 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22663 std::swap (op0
, op1
);
22665 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22670 /* You only get two chances. */
22674 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22675 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22676 operands for the relation operation COND. */
22679 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22680 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22682 machine_mode dest_mode
= GET_MODE (dest
);
22683 machine_mode mask_mode
= GET_MODE (cc_op0
);
22684 enum rtx_code rcode
= GET_CODE (cond
);
22685 machine_mode cc_mode
= CCmode
;
22688 bool invert_move
= false;
22690 if (VECTOR_UNIT_NONE_P (dest_mode
))
22693 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22694 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22698 /* Swap operands if we can, and fall back to doing the operation as
22699 specified, and doing a NOR to invert the test. */
22705 /* Invert condition and try again.
22706 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22707 invert_move
= true;
22708 rcode
= reverse_condition_maybe_unordered (rcode
);
22709 if (rcode
== UNKNOWN
)
22715 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22717 /* Invert condition to avoid compound test. */
22718 invert_move
= true;
22719 rcode
= reverse_condition (rcode
);
22727 /* Mark unsigned tests with CCUNSmode. */
22728 cc_mode
= CCUNSmode
;
22730 /* Invert condition to avoid compound test if necessary. */
22731 if (rcode
== GEU
|| rcode
== LEU
)
22733 invert_move
= true;
22734 rcode
= reverse_condition (rcode
);
22742 /* Get the vector mask for the given relational operations. */
22743 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22749 std::swap (op_true
, op_false
);
22751 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22752 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22753 && (GET_CODE (op_true
) == CONST_VECTOR
22754 || GET_CODE (op_false
) == CONST_VECTOR
))
22756 rtx constant_0
= CONST0_RTX (dest_mode
);
22757 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22759 if (op_true
== constant_m1
&& op_false
== constant_0
)
22761 emit_move_insn (dest
, mask
);
22765 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22767 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22771 /* If we can't use the vector comparison directly, perhaps we can use
22772 the mask for the true or false fields, instead of loading up a
22774 if (op_true
== constant_m1
)
22777 if (op_false
== constant_0
)
22781 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22782 op_true
= force_reg (dest_mode
, op_true
);
22784 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22785 op_false
= force_reg (dest_mode
, op_false
);
22787 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22788 CONST0_RTX (dest_mode
));
22789 emit_insn (gen_rtx_SET (dest
,
22790 gen_rtx_IF_THEN_ELSE (dest_mode
,
22797 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22798 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22799 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22800 hardware has no such operation. */
22803 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22805 enum rtx_code code
= GET_CODE (op
);
22806 rtx op0
= XEXP (op
, 0);
22807 rtx op1
= XEXP (op
, 1);
22808 machine_mode compare_mode
= GET_MODE (op0
);
22809 machine_mode result_mode
= GET_MODE (dest
);
22810 bool max_p
= false;
22812 if (result_mode
!= compare_mode
)
22815 if (code
== GE
|| code
== GT
)
22817 else if (code
== LE
|| code
== LT
)
22822 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22825 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22831 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
22835 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22836 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22837 operands of the last comparison is nonzero/true, FALSE_COND if it is
22838 zero/false. Return 0 if the hardware has no such operation. */
22841 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22843 enum rtx_code code
= GET_CODE (op
);
22844 rtx op0
= XEXP (op
, 0);
22845 rtx op1
= XEXP (op
, 1);
22846 machine_mode result_mode
= GET_MODE (dest
);
22851 if (!can_create_pseudo_p ())
22864 code
= swap_condition (code
);
22865 std::swap (op0
, op1
);
22872 /* Generate: [(parallel [(set (dest)
22873 (if_then_else (op (cmp1) (cmp2))
22876 (clobber (scratch))])]. */
22878 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
22879 cmove_rtx
= gen_rtx_SET (dest
,
22880 gen_rtx_IF_THEN_ELSE (result_mode
,
22885 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
22886 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22887 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
22892 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22893 operands of the last comparison is nonzero/true, FALSE_COND if it
22894 is zero/false. Return 0 if the hardware has no such operation. */
22897 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22899 enum rtx_code code
= GET_CODE (op
);
22900 rtx op0
= XEXP (op
, 0);
22901 rtx op1
= XEXP (op
, 1);
22902 machine_mode compare_mode
= GET_MODE (op0
);
22903 machine_mode result_mode
= GET_MODE (dest
);
22905 bool is_against_zero
;
22907 /* These modes should always match. */
22908 if (GET_MODE (op1
) != compare_mode
22909 /* In the isel case however, we can use a compare immediate, so
22910 op1 may be a small constant. */
22911 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
22913 if (GET_MODE (true_cond
) != result_mode
)
22915 if (GET_MODE (false_cond
) != result_mode
)
22918 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22919 if (TARGET_P9_MINMAX
22920 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
22921 && (result_mode
== SFmode
|| result_mode
== DFmode
))
22923 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
22926 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
22930 /* Don't allow using floating point comparisons for integer results for
22932 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
22935 /* First, work out if the hardware can do this at all, or
22936 if it's too slow.... */
22937 if (!FLOAT_MODE_P (compare_mode
))
22940 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
22944 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
22946 /* A floating-point subtract might overflow, underflow, or produce
22947 an inexact result, thus changing the floating-point flags, so it
22948 can't be generated if we care about that. It's safe if one side
22949 of the construct is zero, since then no subtract will be
22951 if (SCALAR_FLOAT_MODE_P (compare_mode
)
22952 && flag_trapping_math
&& ! is_against_zero
)
22955 /* Eliminate half of the comparisons by switching operands, this
22956 makes the remaining code simpler. */
22957 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
22958 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
22960 code
= reverse_condition_maybe_unordered (code
);
22962 true_cond
= false_cond
;
22966 /* UNEQ and LTGT take four instructions for a comparison with zero,
22967 it'll probably be faster to use a branch here too. */
22968 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
22971 /* We're going to try to implement comparisons by performing
22972 a subtract, then comparing against zero. Unfortunately,
22973 Inf - Inf is NaN which is not zero, and so if we don't
22974 know that the operand is finite and the comparison
22975 would treat EQ different to UNORDERED, we can't do it. */
22976 if (HONOR_INFINITIES (compare_mode
)
22977 && code
!= GT
&& code
!= UNGE
22978 && (GET_CODE (op1
) != CONST_DOUBLE
22979 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
22980 /* Constructs of the form (a OP b ? a : b) are safe. */
22981 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
22982 || (! rtx_equal_p (op0
, true_cond
)
22983 && ! rtx_equal_p (op1
, true_cond
))))
22986 /* At this point we know we can use fsel. */
22988 /* Reduce the comparison to a comparison against zero. */
22989 if (! is_against_zero
)
22991 temp
= gen_reg_rtx (compare_mode
);
22992 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
22994 op1
= CONST0_RTX (compare_mode
);
22997 /* If we don't care about NaNs we can reduce some of the comparisons
22998 down to faster ones. */
22999 if (! HONOR_NANS (compare_mode
))
23005 true_cond
= false_cond
;
23018 /* Now, reduce everything down to a GE. */
23025 temp
= gen_reg_rtx (compare_mode
);
23026 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23031 temp
= gen_reg_rtx (compare_mode
);
23032 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
23037 temp
= gen_reg_rtx (compare_mode
);
23038 emit_insn (gen_rtx_SET (temp
,
23039 gen_rtx_NEG (compare_mode
,
23040 gen_rtx_ABS (compare_mode
, op0
))));
23045 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23046 temp
= gen_reg_rtx (result_mode
);
23047 emit_insn (gen_rtx_SET (temp
,
23048 gen_rtx_IF_THEN_ELSE (result_mode
,
23049 gen_rtx_GE (VOIDmode
,
23051 true_cond
, false_cond
)));
23052 false_cond
= true_cond
;
23055 temp
= gen_reg_rtx (compare_mode
);
23056 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23061 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23062 temp
= gen_reg_rtx (result_mode
);
23063 emit_insn (gen_rtx_SET (temp
,
23064 gen_rtx_IF_THEN_ELSE (result_mode
,
23065 gen_rtx_GE (VOIDmode
,
23067 true_cond
, false_cond
)));
23068 true_cond
= false_cond
;
23071 temp
= gen_reg_rtx (compare_mode
);
23072 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
23077 gcc_unreachable ();
23080 emit_insn (gen_rtx_SET (dest
,
23081 gen_rtx_IF_THEN_ELSE (result_mode
,
23082 gen_rtx_GE (VOIDmode
,
23084 true_cond
, false_cond
)));
23088 /* Same as above, but for ints (isel). */
23091 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
23093 rtx condition_rtx
, cr
;
23094 machine_mode mode
= GET_MODE (dest
);
23095 enum rtx_code cond_code
;
23096 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
23099 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
23102 /* We still have to do the compare, because isel doesn't do a
23103 compare, it just looks at the CRx bits set by a previous compare
23105 condition_rtx
= rs6000_generate_compare (op
, mode
);
23106 cond_code
= GET_CODE (condition_rtx
);
23107 cr
= XEXP (condition_rtx
, 0);
23108 signedp
= GET_MODE (cr
) == CCmode
;
23110 isel_func
= (mode
== SImode
23111 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
23112 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
23116 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
23117 /* isel handles these directly. */
23121 /* We need to swap the sense of the comparison. */
23123 std::swap (false_cond
, true_cond
);
23124 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
23129 false_cond
= force_reg (mode
, false_cond
);
23130 if (true_cond
!= const0_rtx
)
23131 true_cond
= force_reg (mode
, true_cond
);
23133 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
23139 output_isel (rtx
*operands
)
23141 enum rtx_code code
;
23143 code
= GET_CODE (operands
[1]);
23145 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
23147 gcc_assert (GET_CODE (operands
[2]) == REG
23148 && GET_CODE (operands
[3]) == REG
);
23149 PUT_CODE (operands
[1], reverse_condition (code
));
23150 return "isel %0,%3,%2,%j1";
23153 return "isel %0,%2,%3,%j1";
23157 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
23159 machine_mode mode
= GET_MODE (op0
);
23163 /* VSX/altivec have direct min/max insns. */
23164 if ((code
== SMAX
|| code
== SMIN
)
23165 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
23166 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
23168 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
23172 if (code
== SMAX
|| code
== SMIN
)
23177 if (code
== SMAX
|| code
== UMAX
)
23178 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23179 op0
, op1
, mode
, 0);
23181 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
23182 op1
, op0
, mode
, 0);
23183 gcc_assert (target
);
23184 if (target
!= dest
)
23185 emit_move_insn (dest
, target
);
23188 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23189 for the value to come from memory or if it is already loaded into a GPR. */
23192 rs6000_split_signbit (rtx dest
, rtx src
)
23194 machine_mode d_mode
= GET_MODE (dest
);
23195 machine_mode s_mode
= GET_MODE (src
);
23196 rtx dest_di
= (d_mode
== DImode
) ? dest
: gen_lowpart (DImode
, dest
);
23197 rtx shift_reg
= dest_di
;
23199 gcc_assert (FLOAT128_IEEE_P (s_mode
) && TARGET_POWERPC64
);
23203 rtx mem
= (WORDS_BIG_ENDIAN
23204 ? adjust_address (src
, DImode
, 0)
23205 : adjust_address (src
, DImode
, 8));
23206 emit_insn (gen_rtx_SET (dest_di
, mem
));
23211 unsigned int r
= reg_or_subregno (src
);
23213 if (INT_REGNO_P (r
))
23214 shift_reg
= gen_rtx_REG (DImode
, r
+ (BYTES_BIG_ENDIAN
== 0));
23218 /* Generate the special mfvsrd instruction to get it in a GPR. */
23219 gcc_assert (VSX_REGNO_P (r
));
23220 if (s_mode
== KFmode
)
23221 emit_insn (gen_signbitkf2_dm2 (dest_di
, src
));
23223 emit_insn (gen_signbittf2_dm2 (dest_di
, src
));
23227 emit_insn (gen_lshrdi3 (dest_di
, shift_reg
, GEN_INT (63)));
23231 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23232 COND is true. Mark the jump as unlikely to be taken. */
23235 emit_unlikely_jump (rtx cond
, rtx label
)
23237 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
23238 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
23239 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
23242 /* A subroutine of the atomic operation splitters. Emit a load-locked
23243 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23244 the zero_extend operation. */
23247 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
23249 rtx (*fn
) (rtx
, rtx
) = NULL
;
23254 fn
= gen_load_lockedqi
;
23257 fn
= gen_load_lockedhi
;
23260 if (GET_MODE (mem
) == QImode
)
23261 fn
= gen_load_lockedqi_si
;
23262 else if (GET_MODE (mem
) == HImode
)
23263 fn
= gen_load_lockedhi_si
;
23265 fn
= gen_load_lockedsi
;
23268 fn
= gen_load_lockeddi
;
23271 fn
= gen_load_lockedti
;
23274 gcc_unreachable ();
23276 emit_insn (fn (reg
, mem
));
23279 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23280 instruction in MODE. */
23283 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
23285 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
23290 fn
= gen_store_conditionalqi
;
23293 fn
= gen_store_conditionalhi
;
23296 fn
= gen_store_conditionalsi
;
23299 fn
= gen_store_conditionaldi
;
23302 fn
= gen_store_conditionalti
;
23305 gcc_unreachable ();
23308 /* Emit sync before stwcx. to address PPC405 Erratum. */
23309 if (PPC405_ERRATUM77
)
23310 emit_insn (gen_hwsync ());
23312 emit_insn (fn (res
, mem
, val
));
23315 /* Expand barriers before and after a load_locked/store_cond sequence. */
23318 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
23320 rtx addr
= XEXP (mem
, 0);
23322 if (!legitimate_indirect_address_p (addr
, reload_completed
)
23323 && !legitimate_indexed_address_p (addr
, reload_completed
))
23325 addr
= force_reg (Pmode
, addr
);
23326 mem
= replace_equiv_address_nv (mem
, addr
);
23331 case MEMMODEL_RELAXED
:
23332 case MEMMODEL_CONSUME
:
23333 case MEMMODEL_ACQUIRE
:
23335 case MEMMODEL_RELEASE
:
23336 case MEMMODEL_ACQ_REL
:
23337 emit_insn (gen_lwsync ());
23339 case MEMMODEL_SEQ_CST
:
23340 emit_insn (gen_hwsync ());
23343 gcc_unreachable ();
23349 rs6000_post_atomic_barrier (enum memmodel model
)
23353 case MEMMODEL_RELAXED
:
23354 case MEMMODEL_CONSUME
:
23355 case MEMMODEL_RELEASE
:
23357 case MEMMODEL_ACQUIRE
:
23358 case MEMMODEL_ACQ_REL
:
23359 case MEMMODEL_SEQ_CST
:
23360 emit_insn (gen_isync ());
23363 gcc_unreachable ();
23367 /* A subroutine of the various atomic expanders. For sub-word operations,
23368 we must adjust things to operate on SImode. Given the original MEM,
23369 return a new aligned memory. Also build and return the quantities by
23370 which to shift and mask. */
23373 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
23375 rtx addr
, align
, shift
, mask
, mem
;
23376 HOST_WIDE_INT shift_mask
;
23377 machine_mode mode
= GET_MODE (orig_mem
);
23379 /* For smaller modes, we have to implement this via SImode. */
23380 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
23382 addr
= XEXP (orig_mem
, 0);
23383 addr
= force_reg (GET_MODE (addr
), addr
);
23385 /* Aligned memory containing subword. Generate a new memory. We
23386 do not want any of the existing MEM_ATTR data, as we're now
23387 accessing memory outside the original object. */
23388 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
23389 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23390 mem
= gen_rtx_MEM (SImode
, align
);
23391 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
23392 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
23393 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
23395 /* Shift amount for subword relative to aligned word. */
23396 shift
= gen_reg_rtx (SImode
);
23397 addr
= gen_lowpart (SImode
, addr
);
23398 rtx tmp
= gen_reg_rtx (SImode
);
23399 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
23400 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
23401 if (BYTES_BIG_ENDIAN
)
23402 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
23403 shift
, 1, OPTAB_LIB_WIDEN
);
23406 /* Mask for insertion. */
23407 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
23408 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23414 /* A subroutine of the various atomic expanders. For sub-word operands,
23415 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23418 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
23422 x
= gen_reg_rtx (SImode
);
23423 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
23424 gen_rtx_NOT (SImode
, mask
),
23427 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
23432 /* A subroutine of the various atomic expanders. For sub-word operands,
23433 extract WIDE to NARROW via SHIFT. */
23436 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
23438 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
23439 wide
, 1, OPTAB_LIB_WIDEN
);
23440 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
23443 /* Expand an atomic compare and swap operation. */
23446 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
23448 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
23449 rtx label1
, label2
, x
, mask
, shift
;
23450 machine_mode mode
, orig_mode
;
23451 enum memmodel mod_s
, mod_f
;
23454 boolval
= operands
[0];
23455 retval
= operands
[1];
23457 oldval
= operands
[3];
23458 newval
= operands
[4];
23459 is_weak
= (INTVAL (operands
[5]) != 0);
23460 mod_s
= memmodel_base (INTVAL (operands
[6]));
23461 mod_f
= memmodel_base (INTVAL (operands
[7]));
23462 orig_mode
= mode
= GET_MODE (mem
);
23464 mask
= shift
= NULL_RTX
;
23465 if (mode
== QImode
|| mode
== HImode
)
23467 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23468 lwarx and shift/mask operations. With power8, we need to do the
23469 comparison in SImode, but the store is still done in QI/HImode. */
23470 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
23472 if (!TARGET_SYNC_HI_QI
)
23474 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23476 /* Shift and mask OLDVAL into position with the word. */
23477 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
23478 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23480 /* Shift and mask NEWVAL into position within the word. */
23481 newval
= convert_modes (SImode
, mode
, newval
, 1);
23482 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
23483 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23486 /* Prepare to adjust the return value. */
23487 retval
= gen_reg_rtx (SImode
);
23490 else if (reg_overlap_mentioned_p (retval
, oldval
))
23491 oldval
= copy_to_reg (oldval
);
23493 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
23494 oldval
= copy_to_mode_reg (mode
, oldval
);
23496 if (reg_overlap_mentioned_p (retval
, newval
))
23497 newval
= copy_to_reg (newval
);
23499 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
23504 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23505 emit_label (XEXP (label1
, 0));
23507 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23509 emit_load_locked (mode
, retval
, mem
);
23513 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
23514 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23516 cond
= gen_reg_rtx (CCmode
);
23517 /* If we have TImode, synthesize a comparison. */
23518 if (mode
!= TImode
)
23519 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
23522 rtx xor1_result
= gen_reg_rtx (DImode
);
23523 rtx xor2_result
= gen_reg_rtx (DImode
);
23524 rtx or_result
= gen_reg_rtx (DImode
);
23525 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
23526 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
23527 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
23528 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
23530 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
23531 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
23532 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
23533 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
23536 emit_insn (gen_rtx_SET (cond
, x
));
23538 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23539 emit_unlikely_jump (x
, label2
);
23543 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
23545 emit_store_conditional (orig_mode
, cond
, mem
, x
);
23549 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23550 emit_unlikely_jump (x
, label1
);
23553 if (!is_mm_relaxed (mod_f
))
23554 emit_label (XEXP (label2
, 0));
23556 rs6000_post_atomic_barrier (mod_s
);
23558 if (is_mm_relaxed (mod_f
))
23559 emit_label (XEXP (label2
, 0));
23562 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
23563 else if (mode
!= GET_MODE (operands
[1]))
23564 convert_move (operands
[1], retval
, 1);
23566 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23567 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23568 emit_insn (gen_rtx_SET (boolval
, x
));
23571 /* Expand an atomic exchange operation. */
23574 rs6000_expand_atomic_exchange (rtx operands
[])
23576 rtx retval
, mem
, val
, cond
;
23578 enum memmodel model
;
23579 rtx label
, x
, mask
, shift
;
23581 retval
= operands
[0];
23584 model
= memmodel_base (INTVAL (operands
[3]));
23585 mode
= GET_MODE (mem
);
23587 mask
= shift
= NULL_RTX
;
23588 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23590 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23592 /* Shift and mask VAL into position with the word. */
23593 val
= convert_modes (SImode
, mode
, val
, 1);
23594 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23595 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23597 /* Prepare to adjust the return value. */
23598 retval
= gen_reg_rtx (SImode
);
23602 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23604 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23605 emit_label (XEXP (label
, 0));
23607 emit_load_locked (mode
, retval
, mem
);
23611 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23613 cond
= gen_reg_rtx (CCmode
);
23614 emit_store_conditional (mode
, cond
, mem
, x
);
23616 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23617 emit_unlikely_jump (x
, label
);
23619 rs6000_post_atomic_barrier (model
);
23622 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23625 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23626 to perform. MEM is the memory on which to operate. VAL is the second
23627 operand of the binary operator. BEFORE and AFTER are optional locations to
23628 return the value of MEM either before of after the operation. MODEL_RTX
23629 is a CONST_INT containing the memory model to use. */
23632 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23633 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23635 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23636 machine_mode mode
= GET_MODE (mem
);
23637 machine_mode store_mode
= mode
;
23638 rtx label
, x
, cond
, mask
, shift
;
23639 rtx before
= orig_before
, after
= orig_after
;
23641 mask
= shift
= NULL_RTX
;
23642 /* On power8, we want to use SImode for the operation. On previous systems,
23643 use the operation in a subword and shift/mask to get the proper byte or
23645 if (mode
== QImode
|| mode
== HImode
)
23647 if (TARGET_SYNC_HI_QI
)
23649 val
= convert_modes (SImode
, mode
, val
, 1);
23651 /* Prepare to adjust the return value. */
23652 before
= gen_reg_rtx (SImode
);
23654 after
= gen_reg_rtx (SImode
);
23659 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23661 /* Shift and mask VAL into position with the word. */
23662 val
= convert_modes (SImode
, mode
, val
, 1);
23663 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23664 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23670 /* We've already zero-extended VAL. That is sufficient to
23671 make certain that it does not affect other bits. */
23676 /* If we make certain that all of the other bits in VAL are
23677 set, that will be sufficient to not affect other bits. */
23678 x
= gen_rtx_NOT (SImode
, mask
);
23679 x
= gen_rtx_IOR (SImode
, x
, val
);
23680 emit_insn (gen_rtx_SET (val
, x
));
23687 /* These will all affect bits outside the field and need
23688 adjustment via MASK within the loop. */
23692 gcc_unreachable ();
23695 /* Prepare to adjust the return value. */
23696 before
= gen_reg_rtx (SImode
);
23698 after
= gen_reg_rtx (SImode
);
23699 store_mode
= mode
= SImode
;
23703 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23705 label
= gen_label_rtx ();
23706 emit_label (label
);
23707 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23709 if (before
== NULL_RTX
)
23710 before
= gen_reg_rtx (mode
);
23712 emit_load_locked (mode
, before
, mem
);
23716 x
= expand_simple_binop (mode
, AND
, before
, val
,
23717 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23718 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23722 after
= expand_simple_binop (mode
, code
, before
, val
,
23723 after
, 1, OPTAB_LIB_WIDEN
);
23729 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23730 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23731 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23733 else if (store_mode
!= mode
)
23734 x
= convert_modes (store_mode
, mode
, x
, 1);
23736 cond
= gen_reg_rtx (CCmode
);
23737 emit_store_conditional (store_mode
, cond
, mem
, x
);
23739 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23740 emit_unlikely_jump (x
, label
);
23742 rs6000_post_atomic_barrier (model
);
23746 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23747 then do the calcuations in a SImode register. */
23749 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23751 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23753 else if (store_mode
!= mode
)
23755 /* QImode/HImode on machines with lbarx/lharx where we do the native
23756 operation and then do the calcuations in a SImode register. */
23758 convert_move (orig_before
, before
, 1);
23760 convert_move (orig_after
, after
, 1);
23762 else if (orig_after
&& after
!= orig_after
)
23763 emit_move_insn (orig_after
, after
);
23766 /* Emit instructions to move SRC to DST. Called by splitters for
23767 multi-register moves. It will emit at most one instruction for
23768 each register that is accessed; that is, it won't emit li/lis pairs
23769 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23773 rs6000_split_multireg_move (rtx dst
, rtx src
)
23775 /* The register number of the first register being moved. */
23777 /* The mode that is to be moved. */
23779 /* The mode that the move is being done in, and its size. */
23780 machine_mode reg_mode
;
23782 /* The number of registers that will be moved. */
23785 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23786 mode
= GET_MODE (dst
);
23787 nregs
= hard_regno_nregs
[reg
][mode
];
23788 if (FP_REGNO_P (reg
))
23789 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23790 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
23791 else if (ALTIVEC_REGNO_P (reg
))
23792 reg_mode
= V16QImode
;
23794 reg_mode
= word_mode
;
23795 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23797 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23799 /* TDmode residing in FP registers is special, since the ISA requires that
23800 the lower-numbered word of a register pair is always the most significant
23801 word, even in little-endian mode. This does not match the usual subreg
23802 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23803 the appropriate constituent registers "by hand" in little-endian mode.
23805 Note we do not need to check for destructive overlap here since TDmode
23806 can only reside in even/odd register pairs. */
23807 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23812 for (i
= 0; i
< nregs
; i
++)
23814 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23815 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23817 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23818 i
* reg_mode_size
);
23820 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23821 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23823 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23824 i
* reg_mode_size
);
23826 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23832 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23834 /* Move register range backwards, if we might have destructive
23837 for (i
= nregs
- 1; i
>= 0; i
--)
23838 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23839 i
* reg_mode_size
),
23840 simplify_gen_subreg (reg_mode
, src
, mode
,
23841 i
* reg_mode_size
)));
23847 bool used_update
= false;
23848 rtx restore_basereg
= NULL_RTX
;
23850 if (MEM_P (src
) && INT_REGNO_P (reg
))
23854 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
23855 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
23858 breg
= XEXP (XEXP (src
, 0), 0);
23859 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
23860 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
23861 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
23862 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23863 src
= replace_equiv_address (src
, breg
);
23865 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
23867 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
23869 rtx basereg
= XEXP (XEXP (src
, 0), 0);
23872 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
23873 emit_insn (gen_rtx_SET (ndst
,
23874 gen_rtx_MEM (reg_mode
,
23876 used_update
= true;
23879 emit_insn (gen_rtx_SET (basereg
,
23880 XEXP (XEXP (src
, 0), 1)));
23881 src
= replace_equiv_address (src
, basereg
);
23885 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
23886 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
23887 src
= replace_equiv_address (src
, basereg
);
23891 breg
= XEXP (src
, 0);
23892 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
23893 breg
= XEXP (breg
, 0);
23895 /* If the base register we are using to address memory is
23896 also a destination reg, then change that register last. */
23898 && REGNO (breg
) >= REGNO (dst
)
23899 && REGNO (breg
) < REGNO (dst
) + nregs
)
23900 j
= REGNO (breg
) - REGNO (dst
);
23902 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
23906 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23907 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
23910 breg
= XEXP (XEXP (dst
, 0), 0);
23911 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23912 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
23913 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
23915 /* We have to update the breg before doing the store.
23916 Use store with update, if available. */
23920 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23921 emit_insn (TARGET_32BIT
23922 ? (TARGET_POWERPC64
23923 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
23924 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
23925 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
23926 used_update
= true;
23929 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23930 dst
= replace_equiv_address (dst
, breg
);
23932 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
23933 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23935 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
23937 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23940 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23941 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
23944 used_update
= true;
23947 emit_insn (gen_rtx_SET (basereg
,
23948 XEXP (XEXP (dst
, 0), 1)));
23949 dst
= replace_equiv_address (dst
, basereg
);
23953 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23954 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
23955 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
23957 && REG_P (offsetreg
)
23958 && REGNO (basereg
) != REGNO (offsetreg
));
23959 if (REGNO (basereg
) == 0)
23961 rtx tmp
= offsetreg
;
23962 offsetreg
= basereg
;
23965 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
23966 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
23967 dst
= replace_equiv_address (dst
, basereg
);
23970 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23971 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
23974 for (i
= 0; i
< nregs
; i
++)
23976 /* Calculate index to next subword. */
23981 /* If compiler already emitted move of first word by
23982 store with update, no need to do anything. */
23983 if (j
== 0 && used_update
)
23986 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23987 j
* reg_mode_size
),
23988 simplify_gen_subreg (reg_mode
, src
, mode
,
23989 j
* reg_mode_size
)));
23991 if (restore_basereg
!= NULL_RTX
)
23992 emit_insn (restore_basereg
);
23997 /* This page contains routines that are used to determine what the
23998 function prologue and epilogue code will do and write them out. */
24000 /* Determine whether the REG is really used. */
24003 save_reg_p (int reg
)
24005 /* We need to mark the PIC offset register live for the same conditions
24006 as it is set up, or otherwise it won't be saved before we clobber it. */
24008 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
24010 /* When calling eh_return, we must return true for all the cases
24011 where conditional_register_usage marks the PIC offset reg
24013 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
24014 && (crtl
->calls_eh_return
24015 || df_regs_ever_live_p (reg
)
24016 || !constant_pool_empty_p ()))
24019 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
24024 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
24027 /* Return the first fixed-point register that is required to be
24028 saved. 32 if none. */
24031 first_reg_to_save (void)
24035 /* Find lowest numbered live register. */
24036 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
24037 if (save_reg_p (first_reg
))
24042 && crtl
->uses_pic_offset_table
24043 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24044 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
24050 /* Similar, for FP regs. */
24053 first_fp_reg_to_save (void)
24057 /* Find lowest numbered live register. */
24058 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
24059 if (save_reg_p (first_reg
))
24065 /* Similar, for AltiVec regs. */
24068 first_altivec_reg_to_save (void)
24072 /* Stack frame remains as is unless we are in AltiVec ABI. */
24073 if (! TARGET_ALTIVEC_ABI
)
24074 return LAST_ALTIVEC_REGNO
+ 1;
24076 /* On Darwin, the unwind routines are compiled without
24077 TARGET_ALTIVEC, and use save_world to save/restore the
24078 altivec registers when necessary. */
24079 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24080 && ! TARGET_ALTIVEC
)
24081 return FIRST_ALTIVEC_REGNO
+ 20;
24083 /* Find lowest numbered live register. */
24084 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24085 if (save_reg_p (i
))
24091 /* Return a 32-bit mask of the AltiVec registers we need to set in
24092 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24093 the 32-bit word is 0. */
24095 static unsigned int
24096 compute_vrsave_mask (void)
24098 unsigned int i
, mask
= 0;
24100 /* On Darwin, the unwind routines are compiled without
24101 TARGET_ALTIVEC, and use save_world to save/restore the
24102 call-saved altivec registers when necessary. */
24103 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
24104 && ! TARGET_ALTIVEC
)
24107 /* First, find out if we use _any_ altivec registers. */
24108 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24109 if (df_regs_ever_live_p (i
))
24110 mask
|= ALTIVEC_REG_BIT (i
);
24115 /* Next, remove the argument registers from the set. These must
24116 be in the VRSAVE mask set by the caller, so we don't need to add
24117 them in again. More importantly, the mask we compute here is
24118 used to generate CLOBBERs in the set_vrsave insn, and we do not
24119 wish the argument registers to die. */
24120 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
24121 mask
&= ~ALTIVEC_REG_BIT (i
);
24123 /* Similarly, remove the return value from the set. */
24126 diddle_return_value (is_altivec_return_reg
, &yes
);
24128 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
24134 /* For a very restricted set of circumstances, we can cut down the
24135 size of prologues/epilogues by calling our own save/restore-the-world
24139 compute_save_world_info (rs6000_stack_t
*info
)
24141 info
->world_save_p
= 1;
24143 = (WORLD_SAVE_P (info
)
24144 && DEFAULT_ABI
== ABI_DARWIN
24145 && !cfun
->has_nonlocal_label
24146 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
24147 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
24148 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
24149 && info
->cr_save_p
);
24151 /* This will not work in conjunction with sibcalls. Make sure there
24152 are none. (This check is expensive, but seldom executed.) */
24153 if (WORLD_SAVE_P (info
))
24156 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
24157 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
24159 info
->world_save_p
= 0;
24164 if (WORLD_SAVE_P (info
))
24166 /* Even if we're not touching VRsave, make sure there's room on the
24167 stack for it, if it looks like we're calling SAVE_WORLD, which
24168 will attempt to save it. */
24169 info
->vrsave_size
= 4;
24171 /* If we are going to save the world, we need to save the link register too. */
24172 info
->lr_save_p
= 1;
24174 /* "Save" the VRsave register too if we're saving the world. */
24175 if (info
->vrsave_mask
== 0)
24176 info
->vrsave_mask
= compute_vrsave_mask ();
24178 /* Because the Darwin register save/restore routines only handle
24179 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24181 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
24182 && (info
->first_altivec_reg_save
24183 >= FIRST_SAVED_ALTIVEC_REGNO
));
24191 is_altivec_return_reg (rtx reg
, void *xyes
)
24193 bool *yes
= (bool *) xyes
;
24194 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
24199 /* Return whether REG is a global user reg or has been specifed by
24200 -ffixed-REG. We should not restore these, and so cannot use
24201 lmw or out-of-line restore functions if there are any. We also
24202 can't save them (well, emit frame notes for them), because frame
24203 unwinding during exception handling will restore saved registers. */
24206 fixed_reg_p (int reg
)
24208 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24209 backend sets it, overriding anything the user might have given. */
24210 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
24211 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24212 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
24213 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
24216 return fixed_regs
[reg
];
24219 /* Determine the strategy for savings/restoring registers. */
24222 SAVE_MULTIPLE
= 0x1,
24223 SAVE_INLINE_GPRS
= 0x2,
24224 SAVE_INLINE_FPRS
= 0x4,
24225 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
24226 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
24227 SAVE_INLINE_VRS
= 0x20,
24228 REST_MULTIPLE
= 0x100,
24229 REST_INLINE_GPRS
= 0x200,
24230 REST_INLINE_FPRS
= 0x400,
24231 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
24232 REST_INLINE_VRS
= 0x1000
24236 rs6000_savres_strategy (rs6000_stack_t
*info
,
24237 bool using_static_chain_p
)
24241 /* Select between in-line and out-of-line save and restore of regs.
24242 First, all the obvious cases where we don't use out-of-line. */
24243 if (crtl
->calls_eh_return
24244 || cfun
->machine
->ra_need_lr
)
24245 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
24246 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
24247 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
24249 if (info
->first_gp_reg_save
== 32)
24250 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24252 if (info
->first_fp_reg_save
== 64
24253 /* The out-of-line FP routines use double-precision stores;
24254 we can't use those routines if we don't have such stores. */
24255 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
))
24256 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24258 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
24259 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24261 /* Define cutoff for using out-of-line functions to save registers. */
24262 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
24264 if (!optimize_size
)
24266 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24267 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24268 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24272 /* Prefer out-of-line restore if it will exit. */
24273 if (info
->first_fp_reg_save
> 61)
24274 strategy
|= SAVE_INLINE_FPRS
;
24275 if (info
->first_gp_reg_save
> 29)
24277 if (info
->first_fp_reg_save
== 64)
24278 strategy
|= SAVE_INLINE_GPRS
;
24280 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24282 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
24283 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24286 else if (DEFAULT_ABI
== ABI_DARWIN
)
24288 if (info
->first_fp_reg_save
> 60)
24289 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24290 if (info
->first_gp_reg_save
> 29)
24291 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24292 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24296 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24297 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
24298 || info
->first_fp_reg_save
> 61)
24299 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
24300 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24301 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
24304 /* Don't bother to try to save things out-of-line if r11 is occupied
24305 by the static chain. It would require too much fiddling and the
24306 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24307 pointer on Darwin, and AIX uses r1 or r12. */
24308 if (using_static_chain_p
24309 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
24310 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
24312 | SAVE_INLINE_VRS
);
24314 /* Don't ever restore fixed regs. That means we can't use the
24315 out-of-line register restore functions if a fixed reg is in the
24316 range of regs restored. */
24317 if (!(strategy
& REST_INLINE_FPRS
))
24318 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24321 strategy
|= REST_INLINE_FPRS
;
24325 /* We can only use the out-of-line routines to restore fprs if we've
24326 saved all the registers from first_fp_reg_save in the prologue.
24327 Otherwise, we risk loading garbage. Of course, if we have saved
24328 out-of-line then we know we haven't skipped any fprs. */
24329 if ((strategy
& SAVE_INLINE_FPRS
)
24330 && !(strategy
& REST_INLINE_FPRS
))
24331 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
24332 if (!save_reg_p (i
))
24334 strategy
|= REST_INLINE_FPRS
;
24338 /* Similarly, for altivec regs. */
24339 if (!(strategy
& REST_INLINE_VRS
))
24340 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24343 strategy
|= REST_INLINE_VRS
;
24347 if ((strategy
& SAVE_INLINE_VRS
)
24348 && !(strategy
& REST_INLINE_VRS
))
24349 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
24350 if (!save_reg_p (i
))
24352 strategy
|= REST_INLINE_VRS
;
24356 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24357 saved is an out-of-line save or restore. Set up the value for
24358 the next test (excluding out-of-line gprs). */
24359 bool lr_save_p
= (info
->lr_save_p
24360 || !(strategy
& SAVE_INLINE_FPRS
)
24361 || !(strategy
& SAVE_INLINE_VRS
)
24362 || !(strategy
& REST_INLINE_FPRS
)
24363 || !(strategy
& REST_INLINE_VRS
));
24365 if (TARGET_MULTIPLE
24366 && !TARGET_POWERPC64
24367 && info
->first_gp_reg_save
< 31
24368 && !(flag_shrink_wrap
24369 && flag_shrink_wrap_separate
24370 && optimize_function_for_speed_p (cfun
)))
24373 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24374 if (save_reg_p (i
))
24378 /* Don't use store multiple if only one reg needs to be
24379 saved. This can occur for example when the ABI_V4 pic reg
24380 (r30) needs to be saved to make calls, but r31 is not
24382 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24385 /* Prefer store multiple for saves over out-of-line
24386 routines, since the store-multiple instruction will
24387 always be smaller. */
24388 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
24390 /* The situation is more complicated with load multiple.
24391 We'd prefer to use the out-of-line routines for restores,
24392 since the "exit" out-of-line routines can handle the
24393 restore of LR and the frame teardown. However if doesn't
24394 make sense to use the out-of-line routine if that is the
24395 only reason we'd need to save LR, and we can't use the
24396 "exit" out-of-line gpr restore if we have saved some
24397 fprs; In those cases it is advantageous to use load
24398 multiple when available. */
24399 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
24400 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
24404 /* Using the "exit" out-of-line routine does not improve code size
24405 if using it would require lr to be saved and if only saving one
24407 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
24408 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
24410 /* Don't ever restore fixed regs. */
24411 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24412 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24413 if (fixed_reg_p (i
))
24415 strategy
|= REST_INLINE_GPRS
;
24416 strategy
&= ~REST_MULTIPLE
;
24420 /* We can only use load multiple or the out-of-line routines to
24421 restore gprs if we've saved all the registers from
24422 first_gp_reg_save. Otherwise, we risk loading garbage.
24423 Of course, if we have saved out-of-line or used stmw then we know
24424 we haven't skipped any gprs. */
24425 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
24426 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
24427 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
24428 if (!save_reg_p (i
))
24430 strategy
|= REST_INLINE_GPRS
;
24431 strategy
&= ~REST_MULTIPLE
;
24435 if (TARGET_ELF
&& TARGET_64BIT
)
24437 if (!(strategy
& SAVE_INLINE_FPRS
))
24438 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24439 else if (!(strategy
& SAVE_INLINE_GPRS
)
24440 && info
->first_fp_reg_save
== 64)
24441 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
24443 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
24444 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
24446 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
24447 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
24452 /* Calculate the stack information for the current function. This is
24453 complicated by having two separate calling sequences, the AIX calling
24454 sequence and the V.4 calling sequence.
24456 AIX (and Darwin/Mac OS X) stack frames look like:
24458 SP----> +---------------------------------------+
24459 | back chain to caller | 0 0
24460 +---------------------------------------+
24461 | saved CR | 4 8 (8-11)
24462 +---------------------------------------+
24464 +---------------------------------------+
24465 | reserved for compilers | 12 24
24466 +---------------------------------------+
24467 | reserved for binders | 16 32
24468 +---------------------------------------+
24469 | saved TOC pointer | 20 40
24470 +---------------------------------------+
24471 | Parameter save area (+padding*) (P) | 24 48
24472 +---------------------------------------+
24473 | Alloca space (A) | 24+P etc.
24474 +---------------------------------------+
24475 | Local variable space (L) | 24+P+A
24476 +---------------------------------------+
24477 | Float/int conversion temporary (X) | 24+P+A+L
24478 +---------------------------------------+
24479 | Save area for AltiVec registers (W) | 24+P+A+L+X
24480 +---------------------------------------+
24481 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24482 +---------------------------------------+
24483 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24484 +---------------------------------------+
24485 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24486 +---------------------------------------+
24487 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24488 +---------------------------------------+
24489 old SP->| back chain to caller's caller |
24490 +---------------------------------------+
24492 * If the alloca area is present, the parameter save area is
24493 padded so that the former starts 16-byte aligned.
24495 The required alignment for AIX configurations is two words (i.e., 8
24498 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24500 SP----> +---------------------------------------+
24501 | Back chain to caller | 0
24502 +---------------------------------------+
24503 | Save area for CR | 8
24504 +---------------------------------------+
24506 +---------------------------------------+
24507 | Saved TOC pointer | 24
24508 +---------------------------------------+
24509 | Parameter save area (+padding*) (P) | 32
24510 +---------------------------------------+
24511 | Alloca space (A) | 32+P
24512 +---------------------------------------+
24513 | Local variable space (L) | 32+P+A
24514 +---------------------------------------+
24515 | Save area for AltiVec registers (W) | 32+P+A+L
24516 +---------------------------------------+
24517 | AltiVec alignment padding (Y) | 32+P+A+L+W
24518 +---------------------------------------+
24519 | Save area for GP registers (G) | 32+P+A+L+W+Y
24520 +---------------------------------------+
24521 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24522 +---------------------------------------+
24523 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24524 +---------------------------------------+
24526 * If the alloca area is present, the parameter save area is
24527 padded so that the former starts 16-byte aligned.
24529 V.4 stack frames look like:
24531 SP----> +---------------------------------------+
24532 | back chain to caller | 0
24533 +---------------------------------------+
24534 | caller's saved LR | 4
24535 +---------------------------------------+
24536 | Parameter save area (+padding*) (P) | 8
24537 +---------------------------------------+
24538 | Alloca space (A) | 8+P
24539 +---------------------------------------+
24540 | Varargs save area (V) | 8+P+A
24541 +---------------------------------------+
24542 | Local variable space (L) | 8+P+A+V
24543 +---------------------------------------+
24544 | Float/int conversion temporary (X) | 8+P+A+V+L
24545 +---------------------------------------+
24546 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24547 +---------------------------------------+
24548 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24549 +---------------------------------------+
24550 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24551 +---------------------------------------+
24552 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24553 +---------------------------------------+
24554 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24555 +---------------------------------------+
24556 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24557 +---------------------------------------+
24558 old SP->| back chain to caller's caller |
24559 +---------------------------------------+
24561 * If the alloca area is present and the required alignment is
24562 16 bytes, the parameter save area is padded so that the
24563 alloca area starts 16-byte aligned.
24565 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24566 given. (But note below and in sysv4.h that we require only 8 and
24567 may round up the size of our stack frame anyways. The historical
24568 reason is early versions of powerpc-linux which didn't properly
24569 align the stack at program startup. A happy side-effect is that
24570 -mno-eabi libraries can be used with -meabi programs.)
24572 The EABI configuration defaults to the V.4 layout. However,
24573 the stack alignment requirements may differ. If -mno-eabi is not
24574 given, the required stack alignment is 8 bytes; if -mno-eabi is
24575 given, the required alignment is 16 bytes. (But see V.4 comment
24578 #ifndef ABI_STACK_BOUNDARY
24579 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24582 static rs6000_stack_t
*
24583 rs6000_stack_info (void)
24585 /* We should never be called for thunks, we are not set up for that. */
24586 gcc_assert (!cfun
->is_thunk
);
24588 rs6000_stack_t
*info
= &stack_info
;
24589 int reg_size
= TARGET_32BIT
? 4 : 8;
24594 HOST_WIDE_INT non_fixed_size
;
24595 bool using_static_chain_p
;
24597 if (reload_completed
&& info
->reload_completed
)
24600 memset (info
, 0, sizeof (*info
));
24601 info
->reload_completed
= reload_completed
;
24603 /* Select which calling sequence. */
24604 info
->abi
= DEFAULT_ABI
;
24606 /* Calculate which registers need to be saved & save area size. */
24607 info
->first_gp_reg_save
= first_reg_to_save ();
24608 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24609 even if it currently looks like we won't. Reload may need it to
24610 get at a constant; if so, it will have already created a constant
24611 pool entry for it. */
24612 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24613 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24614 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24615 && crtl
->uses_const_pool
24616 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24617 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24619 first_gp
= info
->first_gp_reg_save
;
24621 info
->gp_size
= reg_size
* (32 - first_gp
);
24623 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24624 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24626 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24627 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24628 - info
->first_altivec_reg_save
);
24630 /* Does this function call anything? */
24631 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24633 /* Determine if we need to save the condition code registers. */
24634 if (save_reg_p (CR2_REGNO
)
24635 || save_reg_p (CR3_REGNO
)
24636 || save_reg_p (CR4_REGNO
))
24638 info
->cr_save_p
= 1;
24639 if (DEFAULT_ABI
== ABI_V4
)
24640 info
->cr_size
= reg_size
;
24643 /* If the current function calls __builtin_eh_return, then we need
24644 to allocate stack space for registers that will hold data for
24645 the exception handler. */
24646 if (crtl
->calls_eh_return
)
24649 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24652 ehrd_size
= i
* UNITS_PER_WORD
;
24657 /* In the ELFv2 ABI, we also need to allocate space for separate
24658 CR field save areas if the function calls __builtin_eh_return. */
24659 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24661 /* This hard-codes that we have three call-saved CR fields. */
24662 ehcr_size
= 3 * reg_size
;
24663 /* We do *not* use the regular CR save mechanism. */
24664 info
->cr_save_p
= 0;
24669 /* Determine various sizes. */
24670 info
->reg_size
= reg_size
;
24671 info
->fixed_size
= RS6000_SAVE_AREA
;
24672 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24673 if (cfun
->calls_alloca
)
24675 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24676 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24678 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24679 TARGET_ALTIVEC
? 16 : 8);
24680 if (FRAME_GROWS_DOWNWARD
)
24682 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24683 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24684 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24686 if (TARGET_ALTIVEC_ABI
)
24687 info
->vrsave_mask
= compute_vrsave_mask ();
24689 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24690 info
->vrsave_size
= 4;
24692 compute_save_world_info (info
);
24694 /* Calculate the offsets. */
24695 switch (DEFAULT_ABI
)
24699 gcc_unreachable ();
24704 info
->fp_save_offset
= -info
->fp_size
;
24705 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24707 if (TARGET_ALTIVEC_ABI
)
24709 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24711 /* Align stack so vector save area is on a quadword boundary.
24712 The padding goes above the vectors. */
24713 if (info
->altivec_size
!= 0)
24714 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24716 info
->altivec_save_offset
= info
->vrsave_save_offset
24717 - info
->altivec_padding_size
24718 - info
->altivec_size
;
24719 gcc_assert (info
->altivec_size
== 0
24720 || info
->altivec_save_offset
% 16 == 0);
24722 /* Adjust for AltiVec case. */
24723 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24726 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24728 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24729 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24730 info
->lr_save_offset
= 2*reg_size
;
24734 info
->fp_save_offset
= -info
->fp_size
;
24735 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24736 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24738 if (TARGET_ALTIVEC_ABI
)
24740 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24742 /* Align stack so vector save area is on a quadword boundary. */
24743 if (info
->altivec_size
!= 0)
24744 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24746 info
->altivec_save_offset
= info
->vrsave_save_offset
24747 - info
->altivec_padding_size
24748 - info
->altivec_size
;
24750 /* Adjust for AltiVec case. */
24751 info
->ehrd_offset
= info
->altivec_save_offset
;
24754 info
->ehrd_offset
= info
->cr_save_offset
;
24756 info
->ehrd_offset
-= ehrd_size
;
24757 info
->lr_save_offset
= reg_size
;
24760 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24761 info
->save_size
= RS6000_ALIGN (info
->fp_size
24763 + info
->altivec_size
24764 + info
->altivec_padding_size
24768 + info
->vrsave_size
,
24771 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24773 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24774 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24776 /* Determine if we need to save the link register. */
24778 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24780 && !TARGET_PROFILE_KERNEL
)
24781 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24782 #ifdef TARGET_RELOCATABLE
24783 || (DEFAULT_ABI
== ABI_V4
24784 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24785 && !constant_pool_empty_p ())
24787 || rs6000_ra_ever_killed ())
24788 info
->lr_save_p
= 1;
24790 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24791 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24792 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24793 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24795 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24796 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24797 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24798 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24799 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24800 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24801 info
->lr_save_p
= 1;
24803 if (info
->lr_save_p
)
24804 df_set_regs_ever_live (LR_REGNO
, true);
24806 /* Determine if we need to allocate any stack frame:
24808 For AIX we need to push the stack if a frame pointer is needed
24809 (because the stack might be dynamically adjusted), if we are
24810 debugging, if we make calls, or if the sum of fp_save, gp_save,
24811 and local variables are more than the space needed to save all
24812 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24813 + 18*8 = 288 (GPR13 reserved).
24815 For V.4 we don't have the stack cushion that AIX uses, but assume
24816 that the debugger can handle stackless frames. */
24821 else if (DEFAULT_ABI
== ABI_V4
)
24822 info
->push_p
= non_fixed_size
!= 0;
24824 else if (frame_pointer_needed
)
24827 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24831 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24837 debug_stack_info (rs6000_stack_t
*info
)
24839 const char *abi_string
;
24842 info
= rs6000_stack_info ();
24844 fprintf (stderr
, "\nStack information for function %s:\n",
24845 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24846 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
24851 default: abi_string
= "Unknown"; break;
24852 case ABI_NONE
: abi_string
= "NONE"; break;
24853 case ABI_AIX
: abi_string
= "AIX"; break;
24854 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
24855 case ABI_DARWIN
: abi_string
= "Darwin"; break;
24856 case ABI_V4
: abi_string
= "V.4"; break;
24859 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
24861 if (TARGET_ALTIVEC_ABI
)
24862 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
24864 if (info
->first_gp_reg_save
!= 32)
24865 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
24867 if (info
->first_fp_reg_save
!= 64)
24868 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
24870 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
24871 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
24872 info
->first_altivec_reg_save
);
24874 if (info
->lr_save_p
)
24875 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
24877 if (info
->cr_save_p
)
24878 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
24880 if (info
->vrsave_mask
)
24881 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
24884 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
24887 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
24890 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
24893 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
24895 if (info
->altivec_size
)
24896 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
24897 info
->altivec_save_offset
);
24899 if (info
->vrsave_size
)
24900 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
24901 info
->vrsave_save_offset
);
24903 if (info
->lr_save_p
)
24904 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
24906 if (info
->cr_save_p
)
24907 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
24909 if (info
->varargs_save_offset
)
24910 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
24912 if (info
->total_size
)
24913 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24916 if (info
->vars_size
)
24917 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24920 if (info
->parm_size
)
24921 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
24923 if (info
->fixed_size
)
24924 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
24927 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
24930 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
24932 if (info
->altivec_size
)
24933 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
24935 if (info
->vrsave_size
)
24936 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
24938 if (info
->altivec_padding_size
)
24939 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
24940 info
->altivec_padding_size
);
24943 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
24945 if (info
->save_size
)
24946 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
24948 if (info
->reg_size
!= 4)
24949 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
24951 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
24953 fprintf (stderr
, "\n");
24957 rs6000_return_addr (int count
, rtx frame
)
24959 /* Currently we don't optimize very well between prolog and body
24960 code and for PIC code the code can be actually quite bad, so
24961 don't try to be too clever here. */
24963 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
24965 cfun
->machine
->ra_needs_full_frame
= 1;
24972 plus_constant (Pmode
,
24974 (gen_rtx_MEM (Pmode
,
24975 memory_address (Pmode
, frame
))),
24976 RETURN_ADDRESS_OFFSET
)));
24979 cfun
->machine
->ra_need_lr
= 1;
24980 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
24983 /* Say whether a function is a candidate for sibcall handling or not. */
24986 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
24991 fntype
= TREE_TYPE (decl
);
24993 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
24995 /* We can't do it if the called function has more vector parameters
24996 than the current function; there's nowhere to put the VRsave code. */
24997 if (TARGET_ALTIVEC_ABI
24998 && TARGET_ALTIVEC_VRSAVE
24999 && !(decl
&& decl
== current_function_decl
))
25001 function_args_iterator args_iter
;
25005 /* Functions with vector parameters are required to have a
25006 prototype, so the argument type info must be available
25008 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
25009 if (TREE_CODE (type
) == VECTOR_TYPE
25010 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25013 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
25014 if (TREE_CODE (type
) == VECTOR_TYPE
25015 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
25022 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25023 functions, because the callee may have a different TOC pointer to
25024 the caller and there's no way to ensure we restore the TOC when
25025 we return. With the secure-plt SYSV ABI we can't make non-local
25026 calls when -fpic/PIC because the plt call stubs use r30. */
25027 if (DEFAULT_ABI
== ABI_DARWIN
25028 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25030 && !DECL_EXTERNAL (decl
)
25031 && !DECL_WEAK (decl
)
25032 && (*targetm
.binds_local_p
) (decl
))
25033 || (DEFAULT_ABI
== ABI_V4
25034 && (!TARGET_SECURE_PLT
25037 && (*targetm
.binds_local_p
) (decl
)))))
25039 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
25041 if (!lookup_attribute ("longcall", attr_list
)
25042 || lookup_attribute ("shortcall", attr_list
))
25050 rs6000_ra_ever_killed (void)
25056 if (cfun
->is_thunk
)
25059 if (cfun
->machine
->lr_save_state
)
25060 return cfun
->machine
->lr_save_state
- 1;
25062 /* regs_ever_live has LR marked as used if any sibcalls are present,
25063 but this should not force saving and restoring in the
25064 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25065 clobbers LR, so that is inappropriate. */
25067 /* Also, the prologue can generate a store into LR that
25068 doesn't really count, like this:
25071 bcl to set PIC register
25075 When we're called from the epilogue, we need to avoid counting
25076 this as a store. */
25078 push_topmost_sequence ();
25079 top
= get_insns ();
25080 pop_topmost_sequence ();
25081 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
25083 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
25089 if (!SIBLING_CALL_P (insn
))
25092 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
25094 else if (set_of (reg
, insn
) != NULL_RTX
25095 && !prologue_epilogue_contains (insn
))
25102 /* Emit instructions needed to load the TOC register.
25103 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25104 a constant pool; or for SVR4 -fpic. */
25107 rs6000_emit_load_toc_table (int fromprolog
)
25110 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
25112 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
25115 rtx lab
, tmp1
, tmp2
, got
;
25117 lab
= gen_label_rtx ();
25118 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
25119 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25122 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25126 got
= rs6000_got_sym ();
25127 tmp1
= tmp2
= dest
;
25130 tmp1
= gen_reg_rtx (Pmode
);
25131 tmp2
= gen_reg_rtx (Pmode
);
25133 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
25134 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
25135 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
25136 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
25138 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
25140 emit_insn (gen_load_toc_v4_pic_si ());
25141 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25143 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
25146 rtx temp0
= (fromprolog
25147 ? gen_rtx_REG (Pmode
, 0)
25148 : gen_reg_rtx (Pmode
));
25154 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
25155 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25157 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
25158 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
25160 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
25161 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25162 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
25168 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25170 lab
= gen_label_rtx ();
25171 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
25172 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
25173 if (TARGET_LINK_STACK
)
25174 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
25175 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
25177 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
25179 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
25181 /* This is for AIX code running in non-PIC ELF32. */
25182 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
25185 emit_insn (gen_elf_high (dest
, realsym
));
25186 emit_insn (gen_elf_low (dest
, dest
, realsym
));
25190 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
25193 emit_insn (gen_load_toc_aix_si (dest
));
25195 emit_insn (gen_load_toc_aix_di (dest
));
25199 /* Emit instructions to restore the link register after determining where
25200 its value has been stored. */
25203 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
25205 rs6000_stack_t
*info
= rs6000_stack_info ();
25208 operands
[0] = source
;
25209 operands
[1] = scratch
;
25211 if (info
->lr_save_p
)
25213 rtx frame_rtx
= stack_pointer_rtx
;
25214 HOST_WIDE_INT sp_offset
= 0;
25217 if (frame_pointer_needed
25218 || cfun
->calls_alloca
25219 || info
->total_size
> 32767)
25221 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
25222 emit_move_insn (operands
[1], tmp
);
25223 frame_rtx
= operands
[1];
25225 else if (info
->push_p
)
25226 sp_offset
= info
->total_size
;
25228 tmp
= plus_constant (Pmode
, frame_rtx
,
25229 info
->lr_save_offset
+ sp_offset
);
25230 tmp
= gen_frame_mem (Pmode
, tmp
);
25231 emit_move_insn (tmp
, operands
[0]);
25234 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
25236 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25237 state of lr_save_p so any change from here on would be a bug. In
25238 particular, stop rs6000_ra_ever_killed from considering the SET
25239 of lr we may have added just above. */
25240 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
25243 static GTY(()) alias_set_type set
= -1;
25246 get_TOC_alias_set (void)
25249 set
= new_alias_set ();
25253 /* This returns nonzero if the current function uses the TOC. This is
25254 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25255 is generated by the ABI_V4 load_toc_* patterns. */
25262 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
25265 rtx pat
= PATTERN (insn
);
25268 if (GET_CODE (pat
) == PARALLEL
)
25269 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
25271 rtx sub
= XVECEXP (pat
, 0, i
);
25272 if (GET_CODE (sub
) == USE
)
25274 sub
= XEXP (sub
, 0);
25275 if (GET_CODE (sub
) == UNSPEC
25276 && XINT (sub
, 1) == UNSPEC_TOC
)
25286 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
25288 rtx tocrel
, tocreg
, hi
;
25290 if (TARGET_DEBUG_ADDR
)
25292 if (GET_CODE (symbol
) == SYMBOL_REF
)
25293 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25297 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
25298 GET_RTX_NAME (GET_CODE (symbol
)));
25299 debug_rtx (symbol
);
25303 if (!can_create_pseudo_p ())
25304 df_set_regs_ever_live (TOC_REGISTER
, true);
25306 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
25307 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
25308 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
25311 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
25312 if (largetoc_reg
!= NULL
)
25314 emit_move_insn (largetoc_reg
, hi
);
25317 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
25320 /* Issue assembly directives that create a reference to the given DWARF
25321 FRAME_TABLE_LABEL from the current function section. */
25323 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
25325 fprintf (asm_out_file
, "\t.ref %s\n",
25326 (* targetm
.strip_name_encoding
) (frame_table_label
));
25329 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25330 and the change to the stack pointer. */
25333 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
25340 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25341 if (hard_frame_needed
)
25342 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
25343 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
25344 || (hard_frame_needed
25345 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
25348 p
= rtvec_alloc (i
);
25351 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
25352 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
25355 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
25358 /* Emit the correct code for allocating stack space, as insns.
25359 If COPY_REG, make sure a copy of the old frame is left there.
25360 The generated code may use hard register 0 as a temporary. */
25363 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25366 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25367 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25368 rtx todec
= gen_int_mode (-size
, Pmode
);
25371 if (INTVAL (todec
) != -size
)
25373 warning (0, "stack frame too large");
25374 emit_insn (gen_trap ());
25378 if (crtl
->limit_stack
)
25380 if (REG_P (stack_limit_rtx
)
25381 && REGNO (stack_limit_rtx
) > 1
25382 && REGNO (stack_limit_rtx
) <= 31)
25385 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25388 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25390 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25392 && DEFAULT_ABI
== ABI_V4
25395 rtx toload
= gen_rtx_CONST (VOIDmode
,
25396 gen_rtx_PLUS (Pmode
,
25400 emit_insn (gen_elf_high (tmp_reg
, toload
));
25401 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25402 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25406 warning (0, "stack limit expression is not supported");
25412 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25414 emit_move_insn (copy_reg
, stack_reg
);
25419 /* Need a note here so that try_split doesn't get confused. */
25420 if (get_last_insn () == NULL_RTX
)
25421 emit_note (NOTE_INSN_DELETED
);
25422 insn
= emit_move_insn (tmp_reg
, todec
);
25423 try_split (PATTERN (insn
), insn
, 0);
25427 insn
= emit_insn (TARGET_32BIT
25428 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
25430 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
25431 todec
, stack_reg
));
25432 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25433 it now and set the alias set/attributes. The above gen_*_update
25434 calls will generate a PARALLEL with the MEM set being the first
25436 par
= PATTERN (insn
);
25437 gcc_assert (GET_CODE (par
) == PARALLEL
);
25438 set
= XVECEXP (par
, 0, 0);
25439 gcc_assert (GET_CODE (set
) == SET
);
25440 mem
= SET_DEST (set
);
25441 gcc_assert (MEM_P (mem
));
25442 MEM_NOTRAP_P (mem
) = 1;
25443 set_mem_alias_set (mem
, get_frame_alias_set ());
25445 RTX_FRAME_RELATED_P (insn
) = 1;
25446 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
25447 gen_rtx_SET (stack_reg
, gen_rtx_PLUS (Pmode
, stack_reg
,
25448 GEN_INT (-size
))));
25452 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25454 #if PROBE_INTERVAL > 32768
25455 #error Cannot use indexed addressing mode for stack probing
25458 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25459 inclusive. These are offsets from the current stack pointer. */
25462 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25464 /* See if we have a constant small number of probes to generate. If so,
25465 that's the easy case. */
25466 if (first
+ size
<= 32768)
25470 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25471 it exceeds SIZE. If only one probe is needed, this will not
25472 generate any code. Then probe at FIRST + SIZE. */
25473 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25474 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25477 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25481 /* Otherwise, do the same as above, but in a loop. Note that we must be
25482 extra careful with variables wrapping around because we might be at
25483 the very top (or the very bottom) of the address space and we have
25484 to be able to handle this case properly; in particular, we use an
25485 equality test for the loop condition. */
25488 HOST_WIDE_INT rounded_size
;
25489 rtx r12
= gen_rtx_REG (Pmode
, 12);
25490 rtx r0
= gen_rtx_REG (Pmode
, 0);
25492 /* Sanity check for the addressing mode we're going to use. */
25493 gcc_assert (first
<= 32768);
25495 /* Step 1: round SIZE to the previous multiple of the interval. */
25497 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25500 /* Step 2: compute initial and final value of the loop counter. */
25502 /* TEST_ADDR = SP + FIRST. */
25503 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25506 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25507 if (rounded_size
> 32768)
25509 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25510 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25513 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25517 /* Step 3: the loop
25521 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25524 while (TEST_ADDR != LAST_ADDR)
25526 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25527 until it is equal to ROUNDED_SIZE. */
25530 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
25532 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
25535 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25536 that SIZE is equal to ROUNDED_SIZE. */
25538 if (size
!= rounded_size
)
25539 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25543 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25544 absolute addresses. */
25547 output_probe_stack_range (rtx reg1
, rtx reg2
)
25549 static int labelno
= 0;
25553 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25556 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25558 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25560 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25561 output_asm_insn ("addi %0,%0,%1", xops
);
25563 /* Probe at TEST_ADDR. */
25564 xops
[1] = gen_rtx_REG (Pmode
, 0);
25565 output_asm_insn ("stw %1,0(%0)", xops
);
25567 /* Test if TEST_ADDR == LAST_ADDR. */
25570 output_asm_insn ("cmpd 0,%0,%1", xops
);
25572 output_asm_insn ("cmpw 0,%0,%1", xops
);
25575 fputs ("\tbne 0,", asm_out_file
);
25576 assemble_name_raw (asm_out_file
, loop_lab
);
25577 fputc ('\n', asm_out_file
);
25582 /* This function is called when rs6000_frame_related is processing
25583 SETs within a PARALLEL, and returns whether the REGNO save ought to
25584 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25585 for out-of-line register save functions, store multiple, and the
25586 Darwin world_save. They may contain registers that don't really
25590 interesting_frame_related_regno (unsigned int regno
)
25592 /* Saves apparently of r0 are actually saving LR. It doesn't make
25593 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25594 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25595 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25596 as frame related. */
25599 /* If we see CR2 then we are here on a Darwin world save. Saves of
25600 CR2 signify the whole CR is being saved. This is a long-standing
25601 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25602 that CR needs to be saved. */
25603 if (regno
== CR2_REGNO
)
25605 /* Omit frame info for any user-defined global regs. If frame info
25606 is supplied for them, frame unwinding will restore a user reg.
25607 Also omit frame info for any reg we don't need to save, as that
25608 bloats frame info and can cause problems with shrink wrapping.
25609 Since global regs won't be seen as needing to be saved, both of
25610 these conditions are covered by save_reg_p. */
25611 return save_reg_p (regno
);
25614 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25615 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25616 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25617 deduce these equivalences by itself so it wasn't necessary to hold
25618 its hand so much. Don't be tempted to always supply d2_f_d_e with
25619 the actual cfa register, ie. r31 when we are using a hard frame
25620 pointer. That fails when saving regs off r1, and sched moves the
25621 r31 setup past the reg saves. */
25624 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
25625 rtx reg2
, rtx repl2
)
25629 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
25631 gcc_checking_assert (val
== 0);
25635 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
25638 rtx pat
= PATTERN (insn
);
25639 if (!repl
&& !reg2
)
25641 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25642 if (GET_CODE (pat
) == PARALLEL
)
25643 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25644 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25646 rtx set
= XVECEXP (pat
, 0, i
);
25648 if (!REG_P (SET_SRC (set
))
25649 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25650 RTX_FRAME_RELATED_P (set
) = 1;
25652 RTX_FRAME_RELATED_P (insn
) = 1;
25656 /* We expect that 'pat' is either a SET or a PARALLEL containing
25657 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25658 are important so they all have to be marked RTX_FRAME_RELATED_P.
25659 Call simplify_replace_rtx on the SETs rather than the whole insn
25660 so as to leave the other stuff alone (for example USE of r12). */
25662 set_used_flags (pat
);
25663 if (GET_CODE (pat
) == SET
)
25666 pat
= simplify_replace_rtx (pat
, reg
, repl
);
25668 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
25670 else if (GET_CODE (pat
) == PARALLEL
)
25672 pat
= shallow_copy_rtx (pat
);
25673 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
25675 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25676 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25678 rtx set
= XVECEXP (pat
, 0, i
);
25681 set
= simplify_replace_rtx (set
, reg
, repl
);
25683 set
= simplify_replace_rtx (set
, reg2
, repl2
);
25684 XVECEXP (pat
, 0, i
) = set
;
25686 if (!REG_P (SET_SRC (set
))
25687 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25688 RTX_FRAME_RELATED_P (set
) = 1;
25692 gcc_unreachable ();
25694 RTX_FRAME_RELATED_P (insn
) = 1;
25695 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
25700 /* Returns an insn that has a vrsave set operation with the
25701 appropriate CLOBBERs. */
25704 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
25707 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
25708 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
25711 = gen_rtx_SET (vrsave
,
25712 gen_rtx_UNSPEC_VOLATILE (SImode
,
25713 gen_rtvec (2, reg
, vrsave
),
25714 UNSPECV_SET_VRSAVE
));
25718 /* We need to clobber the registers in the mask so the scheduler
25719 does not move sets to VRSAVE before sets of AltiVec registers.
25721 However, if the function receives nonlocal gotos, reload will set
25722 all call saved registers live. We will end up with:
25724 (set (reg 999) (mem))
25725 (parallel [ (set (reg vrsave) (unspec blah))
25726 (clobber (reg 999))])
25728 The clobber will cause the store into reg 999 to be dead, and
25729 flow will attempt to delete an epilogue insn. In this case, we
25730 need an unspec use/set of the register. */
25732 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
25733 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
25735 if (!epiloguep
|| call_used_regs
[i
])
25736 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
25737 gen_rtx_REG (V4SImode
, i
));
25740 rtx reg
= gen_rtx_REG (V4SImode
, i
);
25743 = gen_rtx_SET (reg
,
25744 gen_rtx_UNSPEC (V4SImode
,
25745 gen_rtvec (1, reg
), 27));
25749 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
25751 for (i
= 0; i
< nclobs
; ++i
)
25752 XVECEXP (insn
, 0, i
) = clobs
[i
];
25758 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
25762 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
25763 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
25764 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
25768 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
25770 return gen_frame_set (reg
, frame_reg
, offset
, false);
25774 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
25776 return gen_frame_set (reg
, frame_reg
, offset
, true);
25779 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25780 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25783 emit_frame_save (rtx frame_reg
, machine_mode mode
,
25784 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
25788 /* Some cases that need register indexed addressing. */
25789 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
25790 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
25792 reg
= gen_rtx_REG (mode
, regno
);
25793 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
25794 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
25795 NULL_RTX
, NULL_RTX
);
25798 /* Emit an offset memory reference suitable for a frame store, while
25799 converting to a valid addressing mode. */
25802 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
25804 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
25807 #ifndef TARGET_FIX_AND_CONTINUE
25808 #define TARGET_FIX_AND_CONTINUE 0
25811 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25812 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25813 #define LAST_SAVRES_REGISTER 31
25814 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25825 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
25827 /* Temporary holding space for an out-of-line register save/restore
25829 static char savres_routine_name
[30];
25831 /* Return the name for an out-of-line register save/restore routine.
25832 We are saving/restoring GPRs if GPR is true. */
25835 rs6000_savres_routine_name (int regno
, int sel
)
25837 const char *prefix
= "";
25838 const char *suffix
= "";
25840 /* Different targets are supposed to define
25841 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25842 routine name could be defined with:
25844 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25846 This is a nice idea in practice, but in reality, things are
25847 complicated in several ways:
25849 - ELF targets have save/restore routines for GPRs.
25851 - PPC64 ELF targets have routines for save/restore of GPRs that
25852 differ in what they do with the link register, so having a set
25853 prefix doesn't work. (We only use one of the save routines at
25854 the moment, though.)
25856 - PPC32 elf targets have "exit" versions of the restore routines
25857 that restore the link register and can save some extra space.
25858 These require an extra suffix. (There are also "tail" versions
25859 of the restore routines and "GOT" versions of the save routines,
25860 but we don't generate those at present. Same problems apply,
25863 We deal with all this by synthesizing our own prefix/suffix and
25864 using that for the simple sprintf call shown above. */
25865 if (DEFAULT_ABI
== ABI_V4
)
25870 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25871 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
25872 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25873 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
25874 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25875 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25879 if ((sel
& SAVRES_LR
))
25882 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25884 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25885 /* No out-of-line save/restore routines for GPRs on AIX. */
25886 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
25890 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25891 prefix
= ((sel
& SAVRES_SAVE
)
25892 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
25893 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
25894 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25896 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25897 if ((sel
& SAVRES_LR
))
25898 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
25902 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
25903 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
25906 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25907 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25912 if (DEFAULT_ABI
== ABI_DARWIN
)
25914 /* The Darwin approach is (slightly) different, in order to be
25915 compatible with code generated by the system toolchain. There is a
25916 single symbol for the start of save sequence, and the code here
25917 embeds an offset into that code on the basis of the first register
25919 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
25920 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25921 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
25922 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
25923 (regno
- 13) * 4, prefix
, regno
);
25924 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25925 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
25926 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
25927 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25928 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
25929 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
25934 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
25936 return savres_routine_name
;
25939 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25940 We are saving/restoring GPRs if GPR is true. */
25943 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
25945 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
25946 ? info
->first_gp_reg_save
25947 : (sel
& SAVRES_REG
) == SAVRES_FPR
25948 ? info
->first_fp_reg_save
- 32
25949 : (sel
& SAVRES_REG
) == SAVRES_VR
25950 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
25955 /* Don't generate bogus routine names. */
25956 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
25957 && regno
<= LAST_SAVRES_REGISTER
25958 && select
>= 0 && select
<= 12);
25960 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
25966 name
= rs6000_savres_routine_name (regno
, sel
);
25968 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
25969 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
25970 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
25976 /* Emit a sequence of insns, including a stack tie if needed, for
25977 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25978 reset the stack pointer, but move the base of the frame into
25979 reg UPDT_REGNO for use by out-of-line register restore routines. */
25982 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
25983 unsigned updt_regno
)
25985 /* If there is nothing to do, don't do anything. */
25986 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
25989 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
25991 /* This blockage is needed so that sched doesn't decide to move
25992 the sp change before the register restores. */
25993 if (DEFAULT_ABI
== ABI_V4
)
25994 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
25995 GEN_INT (frame_off
)));
25997 /* If we are restoring registers out-of-line, we will be using the
25998 "exit" variants of the restore routines, which will reset the
25999 stack for us. But we do need to point updt_reg into the
26000 right place for those routines. */
26001 if (frame_off
!= 0)
26002 return emit_insn (gen_add3_insn (updt_reg_rtx
,
26003 frame_reg_rtx
, GEN_INT (frame_off
)));
26005 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
26010 /* Return the register number used as a pointer by out-of-line
26011 save/restore functions. */
26013 static inline unsigned
26014 ptr_regno_for_savres (int sel
)
26016 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26017 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
26018 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
26021 /* Construct a parallel rtx describing the effect of a call to an
26022 out-of-line register save/restore routine, and emit the insn
26023 or jump_insn as appropriate. */
26026 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
26027 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
26028 machine_mode reg_mode
, int sel
)
26031 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
26032 int reg_size
= GET_MODE_SIZE (reg_mode
);
26039 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26040 ? info
->first_gp_reg_save
26041 : (sel
& SAVRES_REG
) == SAVRES_FPR
26042 ? info
->first_fp_reg_save
26043 : (sel
& SAVRES_REG
) == SAVRES_VR
26044 ? info
->first_altivec_reg_save
26046 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
26048 : (sel
& SAVRES_REG
) == SAVRES_FPR
26050 : (sel
& SAVRES_REG
) == SAVRES_VR
26051 ? LAST_ALTIVEC_REGNO
+ 1
26053 n_regs
= end_reg
- start_reg
;
26054 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
26055 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
26058 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26059 RTVEC_ELT (p
, offset
++) = ret_rtx
;
26061 RTVEC_ELT (p
, offset
++)
26062 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
26064 sym
= rs6000_savres_routine_sym (info
, sel
);
26065 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
26067 use_reg
= ptr_regno_for_savres (sel
);
26068 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
26070 /* Vector regs are saved/restored using [reg+reg] addressing. */
26071 RTVEC_ELT (p
, offset
++)
26072 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26073 RTVEC_ELT (p
, offset
++)
26074 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
26077 RTVEC_ELT (p
, offset
++)
26078 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
26080 for (i
= 0; i
< end_reg
- start_reg
; i
++)
26081 RTVEC_ELT (p
, i
+ offset
)
26082 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
26083 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
26084 (sel
& SAVRES_SAVE
) != 0);
26086 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26087 RTVEC_ELT (p
, i
+ offset
)
26088 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
26090 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
26092 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
26094 insn
= emit_jump_insn (par
);
26095 JUMP_LABEL (insn
) = ret_rtx
;
26098 insn
= emit_insn (par
);
26102 /* Emit code to store CR fields that need to be saved into REG. */
26105 rs6000_emit_move_from_cr (rtx reg
)
26107 /* Only the ELFv2 ABI allows storing only selected fields. */
26108 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
26110 int i
, cr_reg
[8], count
= 0;
26112 /* Collect CR fields that must be saved. */
26113 for (i
= 0; i
< 8; i
++)
26114 if (save_reg_p (CR0_REGNO
+ i
))
26115 cr_reg
[count
++] = i
;
26117 /* If it's just a single one, use mfcrf. */
26120 rtvec p
= rtvec_alloc (1);
26121 rtvec r
= rtvec_alloc (2);
26122 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
26123 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
26125 = gen_rtx_SET (reg
,
26126 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
26128 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26132 /* ??? It might be better to handle count == 2 / 3 cases here
26133 as well, using logical operations to combine the values. */
26136 emit_insn (gen_movesi_from_cr (reg
));
26139 /* Return whether the split-stack arg pointer (r12) is used. */
26142 split_stack_arg_pointer_used_p (void)
26144 /* If the pseudo holding the arg pointer is no longer a pseudo,
26145 then the arg pointer is used. */
26146 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
26147 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
26148 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
26149 < FIRST_PSEUDO_REGISTER
)))
26152 /* Unfortunately we also need to do some code scanning, since
26153 r12 may have been substituted for the pseudo. */
26155 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
26156 FOR_BB_INSNS (bb
, insn
)
26157 if (NONDEBUG_INSN_P (insn
))
26159 /* A call destroys r12. */
26164 FOR_EACH_INSN_USE (use
, insn
)
26166 rtx x
= DF_REF_REG (use
);
26167 if (REG_P (x
) && REGNO (x
) == 12)
26171 FOR_EACH_INSN_DEF (def
, insn
)
26173 rtx x
= DF_REF_REG (def
);
26174 if (REG_P (x
) && REGNO (x
) == 12)
26178 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
26181 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26184 rs6000_global_entry_point_needed_p (void)
26186 /* Only needed for the ELFv2 ABI. */
26187 if (DEFAULT_ABI
!= ABI_ELFv2
)
26190 /* With -msingle-pic-base, we assume the whole program shares the same
26191 TOC, so no global entry point prologues are needed anywhere. */
26192 if (TARGET_SINGLE_PIC_BASE
)
26195 /* Ensure we have a global entry point for thunks. ??? We could
26196 avoid that if the target routine doesn't need a global entry point,
26197 but we do not know whether this is the case at this point. */
26198 if (cfun
->is_thunk
)
26201 /* For regular functions, rs6000_emit_prologue sets this flag if the
26202 routine ever uses the TOC pointer. */
26203 return cfun
->machine
->r2_setup_needed
;
26206 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26208 rs6000_get_separate_components (void)
26210 rs6000_stack_t
*info
= rs6000_stack_info ();
26212 if (WORLD_SAVE_P (info
))
26215 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
26216 && !(info
->savres_strategy
& REST_MULTIPLE
));
26218 /* Component 0 is the save/restore of LR (done via GPR0).
26219 Components 13..31 are the save/restore of GPR13..GPR31.
26220 Components 46..63 are the save/restore of FPR14..FPR31. */
26222 cfun
->machine
->n_components
= 64;
26224 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26225 bitmap_clear (components
);
26227 int reg_size
= TARGET_32BIT
? 4 : 8;
26228 int fp_reg_size
= 8;
26230 /* The GPRs we need saved to the frame. */
26231 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
26232 && (info
->savres_strategy
& REST_INLINE_GPRS
))
26234 int offset
= info
->gp_save_offset
;
26236 offset
+= info
->total_size
;
26238 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26240 if (IN_RANGE (offset
, -0x8000, 0x7fff)
26241 && save_reg_p (regno
))
26242 bitmap_set_bit (components
, regno
);
26244 offset
+= reg_size
;
26248 /* Don't mess with the hard frame pointer. */
26249 if (frame_pointer_needed
)
26250 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
26252 /* Don't mess with the fixed TOC register. */
26253 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
26254 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
26255 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
26256 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
26258 /* The FPRs we need saved to the frame. */
26259 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
26260 && (info
->savres_strategy
& REST_INLINE_FPRS
))
26262 int offset
= info
->fp_save_offset
;
26264 offset
+= info
->total_size
;
26266 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26268 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
26269 bitmap_set_bit (components
, regno
);
26271 offset
+= fp_reg_size
;
26275 /* Optimize LR save and restore if we can. This is component 0. Any
26276 out-of-line register save/restore routines need LR. */
26277 if (info
->lr_save_p
26278 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
26279 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
26280 && (info
->savres_strategy
& REST_INLINE_GPRS
)
26281 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
26282 && (info
->savres_strategy
& REST_INLINE_FPRS
)
26283 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
26284 && (info
->savres_strategy
& REST_INLINE_VRS
))
26286 int offset
= info
->lr_save_offset
;
26288 offset
+= info
->total_size
;
26289 if (IN_RANGE (offset
, -0x8000, 0x7fff))
26290 bitmap_set_bit (components
, 0);
26296 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26298 rs6000_components_for_bb (basic_block bb
)
26300 rs6000_stack_t
*info
= rs6000_stack_info ();
26302 bitmap in
= DF_LIVE_IN (bb
);
26303 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26304 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26306 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26307 bitmap_clear (components
);
26309 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26312 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26313 if (bitmap_bit_p (in
, regno
)
26314 || bitmap_bit_p (gen
, regno
)
26315 || bitmap_bit_p (kill
, regno
))
26316 bitmap_set_bit (components
, regno
);
26319 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26320 if (bitmap_bit_p (in
, regno
)
26321 || bitmap_bit_p (gen
, regno
)
26322 || bitmap_bit_p (kill
, regno
))
26323 bitmap_set_bit (components
, regno
);
26325 /* The link register. */
26326 if (bitmap_bit_p (in
, LR_REGNO
)
26327 || bitmap_bit_p (gen
, LR_REGNO
)
26328 || bitmap_bit_p (kill
, LR_REGNO
))
26329 bitmap_set_bit (components
, 0);
26334 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26336 rs6000_disqualify_components (sbitmap components
, edge e
,
26337 sbitmap edge_components
, bool /*is_prologue*/)
26339 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26340 live where we want to place that code. */
26341 if (bitmap_bit_p (edge_components
, 0)
26342 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26345 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26346 "on entry to bb %d\n", e
->dest
->index
);
26347 bitmap_clear_bit (components
, 0);
26351 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26353 rs6000_emit_prologue_components (sbitmap components
)
26355 rs6000_stack_t
*info
= rs6000_stack_info ();
26356 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26357 ? HARD_FRAME_POINTER_REGNUM
26358 : STACK_POINTER_REGNUM
);
26360 machine_mode reg_mode
= Pmode
;
26361 int reg_size
= TARGET_32BIT
? 4 : 8;
26362 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26364 int fp_reg_size
= 8;
26366 /* Prologue for LR. */
26367 if (bitmap_bit_p (components
, 0))
26369 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26370 rtx_insn
*insn
= emit_move_insn (reg
, gen_rtx_REG (reg_mode
, LR_REGNO
));
26371 RTX_FRAME_RELATED_P (insn
) = 1;
26372 add_reg_note (insn
, REG_CFA_REGISTER
, NULL
);
26374 int offset
= info
->lr_save_offset
;
26376 offset
+= info
->total_size
;
26378 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26379 RTX_FRAME_RELATED_P (insn
) = 1;
26380 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26381 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26382 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26385 /* Prologue for the GPRs. */
26386 int offset
= info
->gp_save_offset
;
26388 offset
+= info
->total_size
;
26390 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26392 if (bitmap_bit_p (components
, i
))
26394 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26395 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26396 RTX_FRAME_RELATED_P (insn
) = 1;
26397 rtx set
= copy_rtx (single_set (insn
));
26398 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26401 offset
+= reg_size
;
26404 /* Prologue for the FPRs. */
26405 offset
= info
->fp_save_offset
;
26407 offset
+= info
->total_size
;
26409 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26411 if (bitmap_bit_p (components
, i
))
26413 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26414 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26415 RTX_FRAME_RELATED_P (insn
) = 1;
26416 rtx set
= copy_rtx (single_set (insn
));
26417 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26420 offset
+= fp_reg_size
;
26424 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26426 rs6000_emit_epilogue_components (sbitmap components
)
26428 rs6000_stack_t
*info
= rs6000_stack_info ();
26429 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26430 ? HARD_FRAME_POINTER_REGNUM
26431 : STACK_POINTER_REGNUM
);
26433 machine_mode reg_mode
= Pmode
;
26434 int reg_size
= TARGET_32BIT
? 4 : 8;
26436 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26438 int fp_reg_size
= 8;
26440 /* Epilogue for the FPRs. */
26441 int offset
= info
->fp_save_offset
;
26443 offset
+= info
->total_size
;
26445 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26447 if (bitmap_bit_p (components
, i
))
26449 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26450 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26451 RTX_FRAME_RELATED_P (insn
) = 1;
26452 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26455 offset
+= fp_reg_size
;
26458 /* Epilogue for the GPRs. */
26459 offset
= info
->gp_save_offset
;
26461 offset
+= info
->total_size
;
26463 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26465 if (bitmap_bit_p (components
, i
))
26467 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26468 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26469 RTX_FRAME_RELATED_P (insn
) = 1;
26470 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26473 offset
+= reg_size
;
26476 /* Epilogue for LR. */
26477 if (bitmap_bit_p (components
, 0))
26479 int offset
= info
->lr_save_offset
;
26481 offset
+= info
->total_size
;
26483 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26484 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26486 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26487 insn
= emit_move_insn (lr
, reg
);
26488 RTX_FRAME_RELATED_P (insn
) = 1;
26489 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26493 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26495 rs6000_set_handled_components (sbitmap components
)
26497 rs6000_stack_t
*info
= rs6000_stack_info ();
26499 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26500 if (bitmap_bit_p (components
, i
))
26501 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26503 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26504 if (bitmap_bit_p (components
, i
))
26505 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26507 if (bitmap_bit_p (components
, 0))
26508 cfun
->machine
->lr_is_wrapped_separately
= true;
26511 /* VRSAVE is a bit vector representing which AltiVec registers
26512 are used. The OS uses this to determine which vector
26513 registers to save on a context switch. We need to save
26514 VRSAVE on the stack frame, add whatever AltiVec registers we
26515 used in this function, and do the corresponding magic in the
26518 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26519 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26521 /* Get VRSAVE into a GPR. */
26522 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26523 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26525 emit_insn (gen_get_vrsave_internal (reg
));
26527 emit_insn (gen_rtx_SET (reg
, vrsave
));
26530 int offset
= info
->vrsave_save_offset
+ frame_off
;
26531 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26533 /* Include the registers in the mask. */
26534 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26536 emit_insn (generate_set_vrsave (reg
, info
, 0));
26539 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26540 called, it left the arg pointer to the old stack in r29. Otherwise, the
26541 arg pointer is the top of the current frame. */
26543 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26544 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26546 cfun
->machine
->split_stack_argp_used
= true;
26550 rtx r12
= gen_rtx_REG (Pmode
, 12);
26551 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26552 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26553 emit_insn_before (set_r12
, sp_adjust
);
26555 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
26557 rtx r12
= gen_rtx_REG (Pmode
, 12);
26558 if (frame_off
== 0)
26559 emit_move_insn (r12
, frame_reg_rtx
);
26561 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
26566 rtx r12
= gen_rtx_REG (Pmode
, 12);
26567 rtx r29
= gen_rtx_REG (Pmode
, 29);
26568 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
26569 rtx not_more
= gen_label_rtx ();
26572 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26573 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
26574 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
26576 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26577 JUMP_LABEL (jump
) = not_more
;
26578 LABEL_NUSES (not_more
) += 1;
26579 emit_move_insn (r12
, r29
);
26580 emit_label (not_more
);
26584 /* Emit function prologue as insns. */
26587 rs6000_emit_prologue (void)
26589 rs6000_stack_t
*info
= rs6000_stack_info ();
26590 machine_mode reg_mode
= Pmode
;
26591 int reg_size
= TARGET_32BIT
? 4 : 8;
26592 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
26594 int fp_reg_size
= 8;
26595 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26596 rtx frame_reg_rtx
= sp_reg_rtx
;
26597 unsigned int cr_save_regno
;
26598 rtx cr_save_rtx
= NULL_RTX
;
26601 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
26602 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
26603 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
26604 int using_split_stack
= (flag_split_stack
26605 && (lookup_attribute ("no_split_stack",
26606 DECL_ATTRIBUTES (cfun
->decl
))
26609 /* Offset to top of frame for frame_reg and sp respectively. */
26610 HOST_WIDE_INT frame_off
= 0;
26611 HOST_WIDE_INT sp_off
= 0;
26612 /* sp_adjust is the stack adjusting instruction, tracked so that the
26613 insn setting up the split-stack arg pointer can be emitted just
26614 prior to it, when r12 is not used here for other purposes. */
26615 rtx_insn
*sp_adjust
= 0;
26618 /* Track and check usage of r0, r11, r12. */
26619 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
26620 #define START_USE(R) do \
26622 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26623 reg_inuse |= 1 << (R); \
26625 #define END_USE(R) do \
26627 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26628 reg_inuse &= ~(1 << (R)); \
26630 #define NOT_INUSE(R) do \
26632 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26635 #define START_USE(R) do {} while (0)
26636 #define END_USE(R) do {} while (0)
26637 #define NOT_INUSE(R) do {} while (0)
26640 if (DEFAULT_ABI
== ABI_ELFv2
26641 && !TARGET_SINGLE_PIC_BASE
)
26643 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
26645 /* With -mminimal-toc we may generate an extra use of r2 below. */
26646 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
26647 && !constant_pool_empty_p ())
26648 cfun
->machine
->r2_setup_needed
= true;
26652 if (flag_stack_usage_info
)
26653 current_function_static_stack_size
= info
->total_size
;
26655 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
26657 HOST_WIDE_INT size
= info
->total_size
;
26659 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
26661 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
26662 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
,
26663 size
- STACK_CHECK_PROTECT
);
26666 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
26669 if (TARGET_FIX_AND_CONTINUE
)
26671 /* gdb on darwin arranges to forward a function from the old
26672 address by modifying the first 5 instructions of the function
26673 to branch to the overriding function. This is necessary to
26674 permit function pointers that point to the old function to
26675 actually forward to the new function. */
26676 emit_insn (gen_nop ());
26677 emit_insn (gen_nop ());
26678 emit_insn (gen_nop ());
26679 emit_insn (gen_nop ());
26680 emit_insn (gen_nop ());
26683 /* Handle world saves specially here. */
26684 if (WORLD_SAVE_P (info
))
26691 /* save_world expects lr in r0. */
26692 reg0
= gen_rtx_REG (Pmode
, 0);
26693 if (info
->lr_save_p
)
26695 insn
= emit_move_insn (reg0
,
26696 gen_rtx_REG (Pmode
, LR_REGNO
));
26697 RTX_FRAME_RELATED_P (insn
) = 1;
26700 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26701 assumptions about the offsets of various bits of the stack
26703 gcc_assert (info
->gp_save_offset
== -220
26704 && info
->fp_save_offset
== -144
26705 && info
->lr_save_offset
== 8
26706 && info
->cr_save_offset
== 4
26709 && (!crtl
->calls_eh_return
26710 || info
->ehrd_offset
== -432)
26711 && info
->vrsave_save_offset
== -224
26712 && info
->altivec_save_offset
== -416);
26714 treg
= gen_rtx_REG (SImode
, 11);
26715 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
26717 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26718 in R11. It also clobbers R12, so beware! */
26720 /* Preserve CR2 for save_world prologues */
26722 sz
+= 32 - info
->first_gp_reg_save
;
26723 sz
+= 64 - info
->first_fp_reg_save
;
26724 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
26725 p
= rtvec_alloc (sz
);
26727 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
26728 gen_rtx_REG (SImode
,
26730 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
26731 gen_rtx_SYMBOL_REF (Pmode
,
26733 /* We do floats first so that the instruction pattern matches
26735 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
26737 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
26739 info
->first_fp_reg_save
+ i
),
26741 info
->fp_save_offset
+ frame_off
+ 8 * i
);
26742 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
26744 = gen_frame_store (gen_rtx_REG (V4SImode
,
26745 info
->first_altivec_reg_save
+ i
),
26747 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
26748 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26750 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26752 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26754 /* CR register traditionally saved as CR2. */
26756 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
26757 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
26758 /* Explain about use of R0. */
26759 if (info
->lr_save_p
)
26761 = gen_frame_store (reg0
,
26762 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
26763 /* Explain what happens to the stack pointer. */
26765 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
26766 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
26769 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26770 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26771 treg
, GEN_INT (-info
->total_size
));
26772 sp_off
= frame_off
= info
->total_size
;
26775 strategy
= info
->savres_strategy
;
26777 /* For V.4, update stack before we do any saving and set back pointer. */
26778 if (! WORLD_SAVE_P (info
)
26780 && (DEFAULT_ABI
== ABI_V4
26781 || crtl
->calls_eh_return
))
26783 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
26784 || !(strategy
& SAVE_INLINE_GPRS
)
26785 || !(strategy
& SAVE_INLINE_VRS
));
26786 int ptr_regno
= -1;
26787 rtx ptr_reg
= NULL_RTX
;
26790 if (info
->total_size
< 32767)
26791 frame_off
= info
->total_size
;
26794 else if (info
->cr_save_p
26796 || info
->first_fp_reg_save
< 64
26797 || info
->first_gp_reg_save
< 32
26798 || info
->altivec_size
!= 0
26799 || info
->vrsave_size
!= 0
26800 || crtl
->calls_eh_return
)
26804 /* The prologue won't be saving any regs so there is no need
26805 to set up a frame register to access any frame save area.
26806 We also won't be using frame_off anywhere below, but set
26807 the correct value anyway to protect against future
26808 changes to this function. */
26809 frame_off
= info
->total_size
;
26811 if (ptr_regno
!= -1)
26813 /* Set up the frame offset to that needed by the first
26814 out-of-line save function. */
26815 START_USE (ptr_regno
);
26816 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26817 frame_reg_rtx
= ptr_reg
;
26818 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
26819 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
26820 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
26821 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
26822 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
26823 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
26824 frame_off
= -ptr_off
;
26826 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
26828 if (REGNO (frame_reg_rtx
) == 12)
26830 sp_off
= info
->total_size
;
26831 if (frame_reg_rtx
!= sp_reg_rtx
)
26832 rs6000_emit_stack_tie (frame_reg_rtx
, false);
26835 /* If we use the link register, get it into r0. */
26836 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
26837 && !cfun
->machine
->lr_is_wrapped_separately
)
26839 rtx addr
, reg
, mem
;
26841 reg
= gen_rtx_REG (Pmode
, 0);
26843 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
26844 RTX_FRAME_RELATED_P (insn
) = 1;
26846 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
26847 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
26849 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
26850 GEN_INT (info
->lr_save_offset
+ frame_off
));
26851 mem
= gen_rtx_MEM (Pmode
, addr
);
26852 /* This should not be of rs6000_sr_alias_set, because of
26853 __builtin_return_address. */
26855 insn
= emit_move_insn (mem
, reg
);
26856 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26857 NULL_RTX
, NULL_RTX
);
26862 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26863 r12 will be needed by out-of-line gpr restore. */
26864 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26865 && !(strategy
& (SAVE_INLINE_GPRS
26866 | SAVE_NOINLINE_GPRS_SAVES_LR
))
26868 if (!WORLD_SAVE_P (info
)
26870 && REGNO (frame_reg_rtx
) != cr_save_regno
26871 && !(using_static_chain_p
&& cr_save_regno
== 11)
26872 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
26874 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
26875 START_USE (cr_save_regno
);
26876 rs6000_emit_move_from_cr (cr_save_rtx
);
26879 /* Do any required saving of fpr's. If only one or two to save, do
26880 it ourselves. Otherwise, call function. */
26881 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
26883 int offset
= info
->fp_save_offset
+ frame_off
;
26884 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26887 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
26888 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
26889 sp_off
- frame_off
);
26891 offset
+= fp_reg_size
;
26894 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
26896 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
26897 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
26898 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26899 rtx ptr_reg
= frame_reg_rtx
;
26901 if (REGNO (frame_reg_rtx
) == ptr_regno
)
26902 gcc_checking_assert (frame_off
== 0);
26905 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26906 NOT_INUSE (ptr_regno
);
26907 emit_insn (gen_add3_insn (ptr_reg
,
26908 frame_reg_rtx
, GEN_INT (frame_off
)));
26910 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
26911 info
->fp_save_offset
,
26912 info
->lr_save_offset
,
26914 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
26915 NULL_RTX
, NULL_RTX
);
26920 /* Save GPRs. This is done as a PARALLEL if we are using
26921 the store-multiple instructions. */
26922 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
26924 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
26925 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
26926 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26927 rtx ptr_reg
= frame_reg_rtx
;
26928 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
26929 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
26932 if (ptr_regno
== 12)
26935 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26937 /* Need to adjust r11 (r12) if we saved any FPRs. */
26938 if (end_save
+ frame_off
!= 0)
26940 rtx offset
= GEN_INT (end_save
+ frame_off
);
26943 frame_off
= -end_save
;
26945 NOT_INUSE (ptr_regno
);
26946 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
26948 else if (!ptr_set_up
)
26950 NOT_INUSE (ptr_regno
);
26951 emit_move_insn (ptr_reg
, frame_reg_rtx
);
26953 ptr_off
= -end_save
;
26954 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
26955 info
->gp_save_offset
+ ptr_off
,
26956 info
->lr_save_offset
+ ptr_off
,
26958 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
26959 NULL_RTX
, NULL_RTX
);
26963 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
26967 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
26968 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26970 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26972 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26973 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26974 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26975 NULL_RTX
, NULL_RTX
);
26977 else if (!WORLD_SAVE_P (info
))
26979 int offset
= info
->gp_save_offset
+ frame_off
;
26980 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26983 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
26984 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
26985 sp_off
- frame_off
);
26987 offset
+= reg_size
;
26991 if (crtl
->calls_eh_return
)
26998 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
26999 if (regno
== INVALID_REGNUM
)
27003 p
= rtvec_alloc (i
);
27007 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
27008 if (regno
== INVALID_REGNUM
)
27012 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
27014 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
27015 RTVEC_ELT (p
, i
) = set
;
27016 RTX_FRAME_RELATED_P (set
) = 1;
27019 insn
= emit_insn (gen_blockage ());
27020 RTX_FRAME_RELATED_P (insn
) = 1;
27021 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
27024 /* In AIX ABI we need to make sure r2 is really saved. */
27025 if (TARGET_AIX
&& crtl
->calls_eh_return
)
27027 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
27028 rtx join_insn
, note
;
27029 rtx_insn
*save_insn
;
27030 long toc_restore_insn
;
27032 tmp_reg
= gen_rtx_REG (Pmode
, 11);
27033 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
27034 if (using_static_chain_p
)
27037 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
27041 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
27042 /* Peek at instruction to which this function returns. If it's
27043 restoring r2, then we know we've already saved r2. We can't
27044 unconditionally save r2 because the value we have will already
27045 be updated if we arrived at this function via a plt call or
27046 toc adjusting stub. */
27047 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
27048 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
27049 + RS6000_TOC_SAVE_SLOT
);
27050 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
27051 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
27052 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
27053 validate_condition_mode (EQ
, CCUNSmode
);
27054 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
27055 emit_insn (gen_rtx_SET (compare_result
,
27056 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
27057 toc_save_done
= gen_label_rtx ();
27058 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
27059 gen_rtx_EQ (VOIDmode
, compare_result
,
27061 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
27063 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
27064 JUMP_LABEL (jump
) = toc_save_done
;
27065 LABEL_NUSES (toc_save_done
) += 1;
27067 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
27068 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
27069 sp_off
- frame_off
);
27071 emit_label (toc_save_done
);
27073 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27074 have a CFG that has different saves along different paths.
27075 Move the note to a dummy blockage insn, which describes that
27076 R2 is unconditionally saved after the label. */
27077 /* ??? An alternate representation might be a special insn pattern
27078 containing both the branch and the store. That might let the
27079 code that minimizes the number of DW_CFA_advance opcodes better
27080 freedom in placing the annotations. */
27081 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
27083 remove_note (save_insn
, note
);
27085 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
27086 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
27087 RTX_FRAME_RELATED_P (save_insn
) = 0;
27089 join_insn
= emit_insn (gen_blockage ());
27090 REG_NOTES (join_insn
) = note
;
27091 RTX_FRAME_RELATED_P (join_insn
) = 1;
27093 if (using_static_chain_p
)
27095 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
27102 /* Save CR if we use any that must be preserved. */
27103 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
27105 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27106 GEN_INT (info
->cr_save_offset
+ frame_off
));
27107 rtx mem
= gen_frame_mem (SImode
, addr
);
27109 /* If we didn't copy cr before, do so now using r0. */
27110 if (cr_save_rtx
== NULL_RTX
)
27113 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
27114 rs6000_emit_move_from_cr (cr_save_rtx
);
27117 /* Saving CR requires a two-instruction sequence: one instruction
27118 to move the CR to a general-purpose register, and a second
27119 instruction that stores the GPR to memory.
27121 We do not emit any DWARF CFI records for the first of these,
27122 because we cannot properly represent the fact that CR is saved in
27123 a register. One reason is that we cannot express that multiple
27124 CR fields are saved; another reason is that on 64-bit, the size
27125 of the CR register in DWARF (4 bytes) differs from the size of
27126 a general-purpose register.
27128 This means if any intervening instruction were to clobber one of
27129 the call-saved CR fields, we'd have incorrect CFI. To prevent
27130 this from happening, we mark the store to memory as a use of
27131 those CR fields, which prevents any such instruction from being
27132 scheduled in between the two instructions. */
27137 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
27138 for (i
= 0; i
< 8; i
++)
27139 if (save_reg_p (CR0_REGNO
+ i
))
27140 crsave_v
[n_crsave
++]
27141 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27143 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
27144 gen_rtvec_v (n_crsave
, crsave_v
)));
27145 END_USE (REGNO (cr_save_rtx
));
27147 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27148 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27149 so we need to construct a frame expression manually. */
27150 RTX_FRAME_RELATED_P (insn
) = 1;
27152 /* Update address to be stack-pointer relative, like
27153 rs6000_frame_related would do. */
27154 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
27155 GEN_INT (info
->cr_save_offset
+ sp_off
));
27156 mem
= gen_frame_mem (SImode
, addr
);
27158 if (DEFAULT_ABI
== ABI_ELFv2
)
27160 /* In the ELFv2 ABI we generate separate CFI records for each
27161 CR field that was actually saved. They all point to the
27162 same 32-bit stack slot. */
27166 for (i
= 0; i
< 8; i
++)
27167 if (save_reg_p (CR0_REGNO
+ i
))
27170 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27172 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
27176 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27177 gen_rtx_PARALLEL (VOIDmode
,
27178 gen_rtvec_v (n_crframe
, crframe
)));
27182 /* In other ABIs, by convention, we use a single CR regnum to
27183 represent the fact that all call-saved CR fields are saved.
27184 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27185 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
27186 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
27190 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27191 *separate* slots if the routine calls __builtin_eh_return, so
27192 that they can be independently restored by the unwinder. */
27193 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
27195 int i
, cr_off
= info
->ehcr_offset
;
27198 /* ??? We might get better performance by using multiple mfocrf
27200 crsave
= gen_rtx_REG (SImode
, 0);
27201 emit_insn (gen_movesi_from_cr (crsave
));
27203 for (i
= 0; i
< 8; i
++)
27204 if (!call_used_regs
[CR0_REGNO
+ i
])
27206 rtvec p
= rtvec_alloc (2);
27208 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
27210 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
27212 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27214 RTX_FRAME_RELATED_P (insn
) = 1;
27215 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
27216 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
27217 sp_reg_rtx
, cr_off
+ sp_off
));
27219 cr_off
+= reg_size
;
27223 /* Update stack and set back pointer unless this is V.4,
27224 for which it was done previously. */
27225 if (!WORLD_SAVE_P (info
) && info
->push_p
27226 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
27228 rtx ptr_reg
= NULL
;
27231 /* If saving altivec regs we need to be able to address all save
27232 locations using a 16-bit offset. */
27233 if ((strategy
& SAVE_INLINE_VRS
) == 0
27234 || (info
->altivec_size
!= 0
27235 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
27236 + info
->total_size
- frame_off
) > 32767)
27237 || (info
->vrsave_size
!= 0
27238 && (info
->vrsave_save_offset
27239 + info
->total_size
- frame_off
) > 32767))
27241 int sel
= SAVRES_SAVE
| SAVRES_VR
;
27242 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
27244 if (using_static_chain_p
27245 && ptr_regno
== STATIC_CHAIN_REGNUM
)
27247 if (REGNO (frame_reg_rtx
) != ptr_regno
)
27248 START_USE (ptr_regno
);
27249 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
27250 frame_reg_rtx
= ptr_reg
;
27251 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
27252 frame_off
= -ptr_off
;
27254 else if (REGNO (frame_reg_rtx
) == 1)
27255 frame_off
= info
->total_size
;
27256 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
27258 if (REGNO (frame_reg_rtx
) == 12)
27260 sp_off
= info
->total_size
;
27261 if (frame_reg_rtx
!= sp_reg_rtx
)
27262 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27265 /* Set frame pointer, if needed. */
27266 if (frame_pointer_needed
)
27268 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
27270 RTX_FRAME_RELATED_P (insn
) = 1;
27273 /* Save AltiVec registers if needed. Save here because the red zone does
27274 not always include AltiVec registers. */
27275 if (!WORLD_SAVE_P (info
)
27276 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27278 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27280 /* Oddly, the vector save/restore functions point r0 at the end
27281 of the save area, then use r11 or r12 to load offsets for
27282 [reg+reg] addressing. */
27283 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27284 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27285 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27287 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27289 if (scratch_regno
== 12)
27291 if (end_save
+ frame_off
!= 0)
27293 rtx offset
= GEN_INT (end_save
+ frame_off
);
27295 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27298 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27300 ptr_off
= -end_save
;
27301 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27302 info
->altivec_save_offset
+ ptr_off
,
27303 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27304 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27305 NULL_RTX
, NULL_RTX
);
27306 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27308 /* The oddity mentioned above clobbered our frame reg. */
27309 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27310 frame_off
= ptr_off
;
27313 else if (!WORLD_SAVE_P (info
)
27314 && info
->altivec_size
!= 0)
27318 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27319 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27321 rtx areg
, savereg
, mem
;
27322 HOST_WIDE_INT offset
;
27324 offset
= (info
->altivec_save_offset
+ frame_off
27325 + 16 * (i
- info
->first_altivec_reg_save
));
27327 savereg
= gen_rtx_REG (V4SImode
, i
);
27329 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27331 mem
= gen_frame_mem (V4SImode
,
27332 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27333 GEN_INT (offset
)));
27334 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27340 areg
= gen_rtx_REG (Pmode
, 0);
27341 emit_move_insn (areg
, GEN_INT (offset
));
27343 /* AltiVec addressing mode is [reg+reg]. */
27344 mem
= gen_frame_mem (V4SImode
,
27345 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27347 /* Rather than emitting a generic move, force use of the stvx
27348 instruction, which we always want on ISA 2.07 (power8) systems.
27349 In particular we don't want xxpermdi/stxvd2x for little
27351 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27354 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27355 areg
, GEN_INT (offset
));
27359 /* VRSAVE is a bit vector representing which AltiVec registers
27360 are used. The OS uses this to determine which vector
27361 registers to save on a context switch. We need to save
27362 VRSAVE on the stack frame, add whatever AltiVec registers we
27363 used in this function, and do the corresponding magic in the
27366 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27368 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27369 be using r12 as frame_reg_rtx and r11 as the static chain
27370 pointer for nested functions. */
27371 int save_regno
= 12;
27372 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27373 && !using_static_chain_p
)
27375 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27378 if (using_static_chain_p
)
27381 NOT_INUSE (save_regno
);
27383 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27386 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27387 if (!TARGET_SINGLE_PIC_BASE
27388 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27389 && !constant_pool_empty_p ())
27390 || (DEFAULT_ABI
== ABI_V4
27391 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27392 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27394 /* If emit_load_toc_table will use the link register, we need to save
27395 it. We use R12 for this purpose because emit_load_toc_table
27396 can use register 0. This allows us to use a plain 'blr' to return
27397 from the procedure more often. */
27398 int save_LR_around_toc_setup
= (TARGET_ELF
27399 && DEFAULT_ABI
== ABI_V4
27401 && ! info
->lr_save_p
27402 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27403 if (save_LR_around_toc_setup
)
27405 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27406 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27409 insn
= emit_move_insn (tmp
, lr
);
27410 RTX_FRAME_RELATED_P (insn
) = 1;
27412 rs6000_emit_load_toc_table (TRUE
);
27414 insn
= emit_move_insn (lr
, tmp
);
27415 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27416 RTX_FRAME_RELATED_P (insn
) = 1;
27419 rs6000_emit_load_toc_table (TRUE
);
27423 if (!TARGET_SINGLE_PIC_BASE
27424 && DEFAULT_ABI
== ABI_DARWIN
27425 && flag_pic
&& crtl
->uses_pic_offset_table
)
27427 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27428 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27430 /* Save and restore LR locally around this call (in R0). */
27431 if (!info
->lr_save_p
)
27432 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27434 emit_insn (gen_load_macho_picbase (src
));
27436 emit_move_insn (gen_rtx_REG (Pmode
,
27437 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27440 if (!info
->lr_save_p
)
27441 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27445 /* If we need to, save the TOC register after doing the stack setup.
27446 Do not emit eh frame info for this save. The unwinder wants info,
27447 conceptually attached to instructions in this function, about
27448 register values in the caller of this function. This R2 may have
27449 already been changed from the value in the caller.
27450 We don't attempt to write accurate DWARF EH frame info for R2
27451 because code emitted by gcc for a (non-pointer) function call
27452 doesn't save and restore R2. Instead, R2 is managed out-of-line
27453 by a linker generated plt call stub when the function resides in
27454 a shared library. This behavior is costly to describe in DWARF,
27455 both in terms of the size of DWARF info and the time taken in the
27456 unwinder to interpret it. R2 changes, apart from the
27457 calls_eh_return case earlier in this function, are handled by
27458 linux-unwind.h frob_update_context. */
27459 if (rs6000_save_toc_in_prologue_p ())
27461 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27462 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27465 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27466 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27467 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27470 /* Output .extern statements for the save/restore routines we use. */
27473 rs6000_output_savres_externs (FILE *file
)
27475 rs6000_stack_t
*info
= rs6000_stack_info ();
27477 if (TARGET_DEBUG_STACK
)
27478 debug_stack_info (info
);
27480 /* Write .extern for any function we will call to save and restore
27482 if (info
->first_fp_reg_save
< 64
27487 int regno
= info
->first_fp_reg_save
- 32;
27489 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27491 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27492 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27493 name
= rs6000_savres_routine_name (regno
, sel
);
27494 fprintf (file
, "\t.extern %s\n", name
);
27496 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27498 bool lr
= (info
->savres_strategy
27499 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27500 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27501 name
= rs6000_savres_routine_name (regno
, sel
);
27502 fprintf (file
, "\t.extern %s\n", name
);
27507 /* Write function prologue. */
27510 rs6000_output_function_prologue (FILE *file
)
27512 if (!cfun
->is_thunk
)
27513 rs6000_output_savres_externs (file
);
27515 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27516 immediately after the global entry point label. */
27517 if (rs6000_global_entry_point_needed_p ())
27519 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27521 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27523 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27525 /* In the small and medium code models, we assume the TOC is less
27526 2 GB away from the text section, so it can be computed via the
27527 following two-instruction sequence. */
27530 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27531 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27532 assemble_name (file
, buf
);
27533 fprintf (file
, "@ha\n");
27534 fprintf (file
, "\taddi 2,2,.TOC.-");
27535 assemble_name (file
, buf
);
27536 fprintf (file
, "@l\n");
27540 /* In the large code model, we allow arbitrary offsets between the
27541 TOC and the text section, so we have to load the offset from
27542 memory. The data field is emitted directly before the global
27543 entry point in rs6000_elf_declare_function_name. */
27546 #ifdef HAVE_AS_ENTRY_MARKERS
27547 /* If supported by the linker, emit a marker relocation. If the
27548 total code size of the final executable or shared library
27549 happens to fit into 2 GB after all, the linker will replace
27550 this code sequence with the sequence for the small or medium
27552 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
27554 fprintf (file
, "\tld 2,");
27555 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
27556 assemble_name (file
, buf
);
27557 fprintf (file
, "-");
27558 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27559 assemble_name (file
, buf
);
27560 fprintf (file
, "(12)\n");
27561 fprintf (file
, "\tadd 2,2,12\n");
27564 fputs ("\t.localentry\t", file
);
27565 assemble_name (file
, name
);
27566 fputs (",.-", file
);
27567 assemble_name (file
, name
);
27568 fputs ("\n", file
);
27571 /* Output -mprofile-kernel code. This needs to be done here instead of
27572 in output_function_profile since it must go after the ELFv2 ABI
27573 local entry point. */
27574 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
27576 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
27577 gcc_assert (!TARGET_32BIT
);
27579 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
27581 /* In the ELFv2 ABI we have no compiler stack word. It must be
27582 the resposibility of _mcount to preserve the static chain
27583 register if required. */
27584 if (DEFAULT_ABI
!= ABI_ELFv2
27585 && cfun
->static_chain_decl
!= NULL
)
27587 asm_fprintf (file
, "\tstd %s,24(%s)\n",
27588 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27589 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27590 asm_fprintf (file
, "\tld %s,24(%s)\n",
27591 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27594 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27597 rs6000_pic_labelno
++;
27600 /* -mprofile-kernel code calls mcount before the function prolog,
27601 so a profiled leaf function should stay a leaf function. */
27603 rs6000_keep_leaf_when_profiled ()
27605 return TARGET_PROFILE_KERNEL
;
27608 /* Non-zero if vmx regs are restored before the frame pop, zero if
27609 we restore after the pop when possible. */
27610 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27612 /* Restoring cr is a two step process: loading a reg from the frame
27613 save, then moving the reg to cr. For ABI_V4 we must let the
27614 unwinder know that the stack location is no longer valid at or
27615 before the stack deallocation, but we can't emit a cfa_restore for
27616 cr at the stack deallocation like we do for other registers.
27617 The trouble is that it is possible for the move to cr to be
27618 scheduled after the stack deallocation. So say exactly where cr
27619 is located on each of the two insns. */
27622 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
27624 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
27625 rtx reg
= gen_rtx_REG (SImode
, regno
);
27626 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
27628 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
27630 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27631 rtx set
= gen_rtx_SET (reg
, cr
);
27633 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
27634 RTX_FRAME_RELATED_P (insn
) = 1;
27639 /* Reload CR from REG. */
27642 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
27647 if (using_mfcr_multiple
)
27649 for (i
= 0; i
< 8; i
++)
27650 if (save_reg_p (CR0_REGNO
+ i
))
27652 gcc_assert (count
);
27655 if (using_mfcr_multiple
&& count
> 1)
27661 p
= rtvec_alloc (count
);
27664 for (i
= 0; i
< 8; i
++)
27665 if (save_reg_p (CR0_REGNO
+ i
))
27667 rtvec r
= rtvec_alloc (2);
27668 RTVEC_ELT (r
, 0) = reg
;
27669 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
27670 RTVEC_ELT (p
, ndx
) =
27671 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
27672 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
27675 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27676 gcc_assert (ndx
== count
);
27678 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27679 CR field separately. */
27680 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27682 for (i
= 0; i
< 8; i
++)
27683 if (save_reg_p (CR0_REGNO
+ i
))
27684 add_reg_note (insn
, REG_CFA_RESTORE
,
27685 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27687 RTX_FRAME_RELATED_P (insn
) = 1;
27691 for (i
= 0; i
< 8; i
++)
27692 if (save_reg_p (CR0_REGNO
+ i
))
27694 rtx insn
= emit_insn (gen_movsi_to_cr_one
27695 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
27697 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27698 CR field separately, attached to the insn that in fact
27699 restores this particular CR field. */
27700 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27702 add_reg_note (insn
, REG_CFA_RESTORE
,
27703 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27705 RTX_FRAME_RELATED_P (insn
) = 1;
27709 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27710 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
27711 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
27713 rtx_insn
*insn
= get_last_insn ();
27714 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27716 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
27717 RTX_FRAME_RELATED_P (insn
) = 1;
27721 /* Like cr, the move to lr instruction can be scheduled after the
27722 stack deallocation, but unlike cr, its stack frame save is still
27723 valid. So we only need to emit the cfa_restore on the correct
27727 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
27729 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
27730 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27732 emit_move_insn (reg
, mem
);
27736 restore_saved_lr (int regno
, bool exit_func
)
27738 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27739 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27740 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
27742 if (!exit_func
&& flag_shrink_wrap
)
27744 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27745 RTX_FRAME_RELATED_P (insn
) = 1;
27750 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
27752 if (DEFAULT_ABI
== ABI_ELFv2
)
27755 for (i
= 0; i
< 8; i
++)
27756 if (save_reg_p (CR0_REGNO
+ i
))
27758 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
27759 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
27763 else if (info
->cr_save_p
)
27764 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27765 gen_rtx_REG (SImode
, CR2_REGNO
),
27768 if (info
->lr_save_p
)
27769 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27770 gen_rtx_REG (Pmode
, LR_REGNO
),
27772 return cfa_restores
;
27775 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27776 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27777 below stack pointer not cloberred by signals. */
27780 offset_below_red_zone_p (HOST_WIDE_INT offset
)
27782 return offset
< (DEFAULT_ABI
== ABI_V4
27784 : TARGET_32BIT
? -220 : -288);
27787 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27790 emit_cfa_restores (rtx cfa_restores
)
27792 rtx_insn
*insn
= get_last_insn ();
27793 rtx
*loc
= ®_NOTES (insn
);
27796 loc
= &XEXP (*loc
, 1);
27797 *loc
= cfa_restores
;
27798 RTX_FRAME_RELATED_P (insn
) = 1;
27801 /* Emit function epilogue as insns. */
27804 rs6000_emit_epilogue (int sibcall
)
27806 rs6000_stack_t
*info
;
27807 int restoring_GPRs_inline
;
27808 int restoring_FPRs_inline
;
27809 int using_load_multiple
;
27810 int using_mtcr_multiple
;
27811 int use_backchain_to_restore_sp
;
27814 HOST_WIDE_INT frame_off
= 0;
27815 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
27816 rtx frame_reg_rtx
= sp_reg_rtx
;
27817 rtx cfa_restores
= NULL_RTX
;
27819 rtx cr_save_reg
= NULL_RTX
;
27820 machine_mode reg_mode
= Pmode
;
27821 int reg_size
= TARGET_32BIT
? 4 : 8;
27822 machine_mode fp_reg_mode
= (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
27824 int fp_reg_size
= 8;
27827 unsigned ptr_regno
;
27829 info
= rs6000_stack_info ();
27831 strategy
= info
->savres_strategy
;
27832 using_load_multiple
= strategy
& REST_MULTIPLE
;
27833 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
27834 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
27835 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
27836 || rs6000_cpu
== PROCESSOR_PPC603
27837 || rs6000_cpu
== PROCESSOR_PPC750
27839 /* Restore via the backchain when we have a large frame, since this
27840 is more efficient than an addis, addi pair. The second condition
27841 here will not trigger at the moment; We don't actually need a
27842 frame pointer for alloca, but the generic parts of the compiler
27843 give us one anyway. */
27844 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
27845 ? info
->lr_save_offset
27847 || (cfun
->calls_alloca
27848 && !frame_pointer_needed
));
27849 restore_lr
= (info
->lr_save_p
27850 && (restoring_FPRs_inline
27851 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
27852 && (restoring_GPRs_inline
27853 || info
->first_fp_reg_save
< 64)
27854 && !cfun
->machine
->lr_is_wrapped_separately
);
27857 if (WORLD_SAVE_P (info
))
27861 const char *alloc_rname
;
27864 /* eh_rest_world_r10 will return to the location saved in the LR
27865 stack slot (which is not likely to be our caller.)
27866 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27867 rest_world is similar, except any R10 parameter is ignored.
27868 The exception-handling stuff that was here in 2.95 is no
27869 longer necessary. */
27872 + 32 - info
->first_gp_reg_save
27873 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
27874 + 63 + 1 - info
->first_fp_reg_save
);
27876 strcpy (rname
, ((crtl
->calls_eh_return
) ?
27877 "*eh_rest_world_r10" : "*rest_world"));
27878 alloc_rname
= ggc_strdup (rname
);
27881 RTVEC_ELT (p
, j
++) = ret_rtx
;
27883 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
27884 /* The instruction pattern requires a clobber here;
27885 it is shared with the restVEC helper. */
27887 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
27890 /* CR register traditionally saved as CR2. */
27891 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
27893 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
27894 if (flag_shrink_wrap
)
27896 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27897 gen_rtx_REG (Pmode
, LR_REGNO
),
27899 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27903 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27905 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
27907 = gen_frame_load (reg
,
27908 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
27909 if (flag_shrink_wrap
27910 && save_reg_p (info
->first_gp_reg_save
+ i
))
27911 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27913 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
27915 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
27917 = gen_frame_load (reg
,
27918 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
27919 if (flag_shrink_wrap
27920 && save_reg_p (info
->first_altivec_reg_save
+ i
))
27921 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27923 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
27925 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
27926 ? DFmode
: SFmode
),
27927 info
->first_fp_reg_save
+ i
);
27929 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
27930 if (flag_shrink_wrap
27931 && save_reg_p (info
->first_fp_reg_save
+ i
))
27932 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27935 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
27937 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
27939 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
27941 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
27943 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
27944 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27946 if (flag_shrink_wrap
)
27948 REG_NOTES (insn
) = cfa_restores
;
27949 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
27950 RTX_FRAME_RELATED_P (insn
) = 1;
27955 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27957 frame_off
= info
->total_size
;
27959 /* Restore AltiVec registers if we must do so before adjusting the
27961 if (info
->altivec_size
!= 0
27962 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27963 || (DEFAULT_ABI
!= ABI_V4
27964 && offset_below_red_zone_p (info
->altivec_save_offset
))))
27967 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
27969 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27970 if (use_backchain_to_restore_sp
)
27972 int frame_regno
= 11;
27974 if ((strategy
& REST_INLINE_VRS
) == 0)
27976 /* Of r11 and r12, select the one not clobbered by an
27977 out-of-line restore function for the frame register. */
27978 frame_regno
= 11 + 12 - scratch_regno
;
27980 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
27981 emit_move_insn (frame_reg_rtx
,
27982 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
27985 else if (frame_pointer_needed
)
27986 frame_reg_rtx
= hard_frame_pointer_rtx
;
27988 if ((strategy
& REST_INLINE_VRS
) == 0)
27990 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27992 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27993 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27995 if (end_save
+ frame_off
!= 0)
27997 rtx offset
= GEN_INT (end_save
+ frame_off
);
27999 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28002 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28004 ptr_off
= -end_save
;
28005 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28006 info
->altivec_save_offset
+ ptr_off
,
28007 0, V4SImode
, SAVRES_VR
);
28011 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28012 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28014 rtx addr
, areg
, mem
, insn
;
28015 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28016 HOST_WIDE_INT offset
28017 = (info
->altivec_save_offset
+ frame_off
28018 + 16 * (i
- info
->first_altivec_reg_save
));
28020 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28022 mem
= gen_frame_mem (V4SImode
,
28023 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28024 GEN_INT (offset
)));
28025 insn
= gen_rtx_SET (reg
, mem
);
28029 areg
= gen_rtx_REG (Pmode
, 0);
28030 emit_move_insn (areg
, GEN_INT (offset
));
28032 /* AltiVec addressing mode is [reg+reg]. */
28033 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28034 mem
= gen_frame_mem (V4SImode
, addr
);
28036 /* Rather than emitting a generic move, force use of the
28037 lvx instruction, which we always want. In particular we
28038 don't want lxvd2x/xxpermdi for little endian. */
28039 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28042 (void) emit_insn (insn
);
28046 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28047 if (((strategy
& REST_INLINE_VRS
) == 0
28048 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28049 && (flag_shrink_wrap
28050 || (offset_below_red_zone_p
28051 (info
->altivec_save_offset
28052 + 16 * (i
- info
->first_altivec_reg_save
))))
28055 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28056 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28060 /* Restore VRSAVE if we must do so before adjusting the stack. */
28061 if (info
->vrsave_size
!= 0
28062 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28063 || (DEFAULT_ABI
!= ABI_V4
28064 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
28068 if (frame_reg_rtx
== sp_reg_rtx
)
28070 if (use_backchain_to_restore_sp
)
28072 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28073 emit_move_insn (frame_reg_rtx
,
28074 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28077 else if (frame_pointer_needed
)
28078 frame_reg_rtx
= hard_frame_pointer_rtx
;
28081 reg
= gen_rtx_REG (SImode
, 12);
28082 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28083 info
->vrsave_save_offset
+ frame_off
));
28085 emit_insn (generate_set_vrsave (reg
, info
, 1));
28089 /* If we have a large stack frame, restore the old stack pointer
28090 using the backchain. */
28091 if (use_backchain_to_restore_sp
)
28093 if (frame_reg_rtx
== sp_reg_rtx
)
28095 /* Under V.4, don't reset the stack pointer until after we're done
28096 loading the saved registers. */
28097 if (DEFAULT_ABI
== ABI_V4
)
28098 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28100 insn
= emit_move_insn (frame_reg_rtx
,
28101 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
28104 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28105 && DEFAULT_ABI
== ABI_V4
)
28106 /* frame_reg_rtx has been set up by the altivec restore. */
28110 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
28111 frame_reg_rtx
= sp_reg_rtx
;
28114 /* If we have a frame pointer, we can restore the old stack pointer
28116 else if (frame_pointer_needed
)
28118 frame_reg_rtx
= sp_reg_rtx
;
28119 if (DEFAULT_ABI
== ABI_V4
)
28120 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
28121 /* Prevent reordering memory accesses against stack pointer restore. */
28122 else if (cfun
->calls_alloca
28123 || offset_below_red_zone_p (-info
->total_size
))
28124 rs6000_emit_stack_tie (frame_reg_rtx
, true);
28126 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
28127 GEN_INT (info
->total_size
)));
28130 else if (info
->push_p
28131 && DEFAULT_ABI
!= ABI_V4
28132 && !crtl
->calls_eh_return
)
28134 /* Prevent reordering memory accesses against stack pointer restore. */
28135 if (cfun
->calls_alloca
28136 || offset_below_red_zone_p (-info
->total_size
))
28137 rs6000_emit_stack_tie (frame_reg_rtx
, false);
28138 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
28139 GEN_INT (info
->total_size
)));
28142 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
28146 REG_NOTES (insn
) = cfa_restores
;
28147 cfa_restores
= NULL_RTX
;
28149 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28150 RTX_FRAME_RELATED_P (insn
) = 1;
28153 /* Restore AltiVec registers if we have not done so already. */
28154 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28155 && info
->altivec_size
!= 0
28156 && (DEFAULT_ABI
== ABI_V4
28157 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
28161 if ((strategy
& REST_INLINE_VRS
) == 0)
28163 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
28165 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
28166 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
28167 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
28169 if (end_save
+ frame_off
!= 0)
28171 rtx offset
= GEN_INT (end_save
+ frame_off
);
28173 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
28176 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28178 ptr_off
= -end_save
;
28179 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
28180 info
->altivec_save_offset
+ ptr_off
,
28181 0, V4SImode
, SAVRES_VR
);
28182 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
28184 /* Frame reg was clobbered by out-of-line save. Restore it
28185 from ptr_reg, and if we are calling out-of-line gpr or
28186 fpr restore set up the correct pointer and offset. */
28187 unsigned newptr_regno
= 1;
28188 if (!restoring_GPRs_inline
)
28190 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28191 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28192 newptr_regno
= ptr_regno_for_savres (sel
);
28193 end_save
= info
->gp_save_offset
+ info
->gp_size
;
28195 else if (!restoring_FPRs_inline
)
28197 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
28198 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28199 newptr_regno
= ptr_regno_for_savres (sel
);
28200 end_save
= info
->fp_save_offset
+ info
->fp_size
;
28203 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
28204 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
28206 if (end_save
+ ptr_off
!= 0)
28208 rtx offset
= GEN_INT (end_save
+ ptr_off
);
28210 frame_off
= -end_save
;
28212 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
28215 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
28220 frame_off
= ptr_off
;
28221 emit_move_insn (frame_reg_rtx
, ptr_reg
);
28227 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28228 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
28230 rtx addr
, areg
, mem
, insn
;
28231 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28232 HOST_WIDE_INT offset
28233 = (info
->altivec_save_offset
+ frame_off
28234 + 16 * (i
- info
->first_altivec_reg_save
));
28236 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
28238 mem
= gen_frame_mem (V4SImode
,
28239 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
28240 GEN_INT (offset
)));
28241 insn
= gen_rtx_SET (reg
, mem
);
28245 areg
= gen_rtx_REG (Pmode
, 0);
28246 emit_move_insn (areg
, GEN_INT (offset
));
28248 /* AltiVec addressing mode is [reg+reg]. */
28249 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
28250 mem
= gen_frame_mem (V4SImode
, addr
);
28252 /* Rather than emitting a generic move, force use of the
28253 lvx instruction, which we always want. In particular we
28254 don't want lxvd2x/xxpermdi for little endian. */
28255 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
28258 (void) emit_insn (insn
);
28262 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
28263 if (((strategy
& REST_INLINE_VRS
) == 0
28264 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
28265 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28268 rtx reg
= gen_rtx_REG (V4SImode
, i
);
28269 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28273 /* Restore VRSAVE if we have not done so already. */
28274 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28275 && info
->vrsave_size
!= 0
28276 && (DEFAULT_ABI
== ABI_V4
28277 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28281 reg
= gen_rtx_REG (SImode
, 12);
28282 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28283 info
->vrsave_save_offset
+ frame_off
));
28285 emit_insn (generate_set_vrsave (reg
, info
, 1));
28288 /* If we exit by an out-of-line restore function on ABI_V4 then that
28289 function will deallocate the stack, so we don't need to worry
28290 about the unwinder restoring cr from an invalid stack frame
28292 exit_func
= (!restoring_FPRs_inline
28293 || (!restoring_GPRs_inline
28294 && info
->first_fp_reg_save
== 64));
28296 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28297 *separate* slots if the routine calls __builtin_eh_return, so
28298 that they can be independently restored by the unwinder. */
28299 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28301 int i
, cr_off
= info
->ehcr_offset
;
28303 for (i
= 0; i
< 8; i
++)
28304 if (!call_used_regs
[CR0_REGNO
+ i
])
28306 rtx reg
= gen_rtx_REG (SImode
, 0);
28307 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28308 cr_off
+ frame_off
));
28310 insn
= emit_insn (gen_movsi_to_cr_one
28311 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28313 if (!exit_func
&& flag_shrink_wrap
)
28315 add_reg_note (insn
, REG_CFA_RESTORE
,
28316 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28318 RTX_FRAME_RELATED_P (insn
) = 1;
28321 cr_off
+= reg_size
;
28325 /* Get the old lr if we saved it. If we are restoring registers
28326 out-of-line, then the out-of-line routines can do this for us. */
28327 if (restore_lr
&& restoring_GPRs_inline
)
28328 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28330 /* Get the old cr if we saved it. */
28331 if (info
->cr_save_p
)
28333 unsigned cr_save_regno
= 12;
28335 if (!restoring_GPRs_inline
)
28337 /* Ensure we don't use the register used by the out-of-line
28338 gpr register restore below. */
28339 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28340 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28341 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28343 if (gpr_ptr_regno
== 12)
28344 cr_save_regno
= 11;
28345 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28347 else if (REGNO (frame_reg_rtx
) == 12)
28348 cr_save_regno
= 11;
28350 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28351 info
->cr_save_offset
+ frame_off
,
28355 /* Set LR here to try to overlap restores below. */
28356 if (restore_lr
&& restoring_GPRs_inline
)
28357 restore_saved_lr (0, exit_func
);
28359 /* Load exception handler data registers, if needed. */
28360 if (crtl
->calls_eh_return
)
28362 unsigned int i
, regno
;
28366 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28367 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28368 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28375 regno
= EH_RETURN_DATA_REGNO (i
);
28376 if (regno
== INVALID_REGNUM
)
28379 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28380 info
->ehrd_offset
+ frame_off
28381 + reg_size
* (int) i
);
28383 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28387 /* Restore GPRs. This is done as a PARALLEL if we are using
28388 the load-multiple instructions. */
28389 if (!restoring_GPRs_inline
)
28391 /* We are jumping to an out-of-line function. */
28393 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28394 bool can_use_exit
= end_save
== 0;
28395 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28398 /* Emit stack reset code if we need it. */
28399 ptr_regno
= ptr_regno_for_savres (sel
);
28400 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28402 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28403 else if (end_save
+ frame_off
!= 0)
28404 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28405 GEN_INT (end_save
+ frame_off
)));
28406 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28407 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28408 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28409 frame_off
= -end_save
;
28411 if (can_use_exit
&& info
->cr_save_p
)
28412 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28414 ptr_off
= -end_save
;
28415 rs6000_emit_savres_rtx (info
, ptr_reg
,
28416 info
->gp_save_offset
+ ptr_off
,
28417 info
->lr_save_offset
+ ptr_off
,
28420 else if (using_load_multiple
)
28423 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28424 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28426 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28428 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28429 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28433 int offset
= info
->gp_save_offset
+ frame_off
;
28434 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28437 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28439 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28440 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28443 offset
+= reg_size
;
28447 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28449 /* If the frame pointer was used then we can't delay emitting
28450 a REG_CFA_DEF_CFA note. This must happen on the insn that
28451 restores the frame pointer, r31. We may have already emitted
28452 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28453 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28454 be harmless if emitted. */
28455 if (frame_pointer_needed
)
28457 insn
= get_last_insn ();
28458 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28459 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28460 RTX_FRAME_RELATED_P (insn
) = 1;
28463 /* Set up cfa_restores. We always need these when
28464 shrink-wrapping. If not shrink-wrapping then we only need
28465 the cfa_restore when the stack location is no longer valid.
28466 The cfa_restores must be emitted on or before the insn that
28467 invalidates the stack, and of course must not be emitted
28468 before the insn that actually does the restore. The latter
28469 is why it is a bad idea to emit the cfa_restores as a group
28470 on the last instruction here that actually does a restore:
28471 That insn may be reordered with respect to others doing
28473 if (flag_shrink_wrap
28474 && !restoring_GPRs_inline
28475 && info
->first_fp_reg_save
== 64)
28476 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28478 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28480 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28482 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28483 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28487 if (!restoring_GPRs_inline
28488 && info
->first_fp_reg_save
== 64)
28490 /* We are jumping to an out-of-line function. */
28492 emit_cfa_restores (cfa_restores
);
28496 if (restore_lr
&& !restoring_GPRs_inline
)
28498 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28499 restore_saved_lr (0, exit_func
);
28502 /* Restore fpr's if we need to do it without calling a function. */
28503 if (restoring_FPRs_inline
)
28505 int offset
= info
->fp_save_offset
+ frame_off
;
28506 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28509 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28511 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28512 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28513 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28514 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28518 offset
+= fp_reg_size
;
28522 /* If we saved cr, restore it here. Just those that were used. */
28523 if (info
->cr_save_p
)
28524 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28526 /* If this is V.4, unwind the stack pointer after all of the loads
28527 have been done, or set up r11 if we are restoring fp out of line. */
28529 if (!restoring_FPRs_inline
)
28531 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28532 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28533 ptr_regno
= ptr_regno_for_savres (sel
);
28536 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28537 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28540 if (insn
&& restoring_FPRs_inline
)
28544 REG_NOTES (insn
) = cfa_restores
;
28545 cfa_restores
= NULL_RTX
;
28547 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28548 RTX_FRAME_RELATED_P (insn
) = 1;
28551 if (crtl
->calls_eh_return
)
28553 rtx sa
= EH_RETURN_STACKADJ_RTX
;
28554 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
28557 if (!sibcall
&& restoring_FPRs_inline
)
28561 /* We can't hang the cfa_restores off a simple return,
28562 since the shrink-wrap code sometimes uses an existing
28563 return. This means there might be a path from
28564 pre-prologue code to this return, and dwarf2cfi code
28565 wants the eh_frame unwinder state to be the same on
28566 all paths to any point. So we need to emit the
28567 cfa_restores before the return. For -m64 we really
28568 don't need epilogue cfa_restores at all, except for
28569 this irritating dwarf2cfi with shrink-wrap
28570 requirement; The stack red-zone means eh_frame info
28571 from the prologue telling the unwinder to restore
28572 from the stack is perfectly good right to the end of
28574 emit_insn (gen_blockage ());
28575 emit_cfa_restores (cfa_restores
);
28576 cfa_restores
= NULL_RTX
;
28579 emit_jump_insn (targetm
.gen_simple_return ());
28582 if (!sibcall
&& !restoring_FPRs_inline
)
28584 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28585 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
28587 RTVEC_ELT (p
, elt
++) = ret_rtx
;
28589 RTVEC_ELT (p
, elt
++)
28590 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
28592 /* We have to restore more than two FP registers, so branch to the
28593 restore function. It will return to our caller. */
28598 if (flag_shrink_wrap
)
28599 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28601 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
28602 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
28603 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
28604 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
28606 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
28608 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
28610 RTVEC_ELT (p
, elt
++)
28611 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28612 if (flag_shrink_wrap
28613 && save_reg_p (info
->first_fp_reg_save
+ i
))
28614 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28617 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28623 /* Ensure the cfa_restores are hung off an insn that won't
28624 be reordered above other restores. */
28625 emit_insn (gen_blockage ());
28627 emit_cfa_restores (cfa_restores
);
28631 /* Write function epilogue. */
28634 rs6000_output_function_epilogue (FILE *file
)
28637 macho_branch_islands ();
28640 rtx_insn
*insn
= get_last_insn ();
28641 rtx_insn
*deleted_debug_label
= NULL
;
28643 /* Mach-O doesn't support labels at the end of objects, so if
28644 it looks like we might want one, take special action.
28646 First, collect any sequence of deleted debug labels. */
28649 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
28651 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28652 notes only, instead set their CODE_LABEL_NUMBER to -1,
28653 otherwise there would be code generation differences
28654 in between -g and -g0. */
28655 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28656 deleted_debug_label
= insn
;
28657 insn
= PREV_INSN (insn
);
28660 /* Second, if we have:
28663 then this needs to be detected, so skip past the barrier. */
28665 if (insn
&& BARRIER_P (insn
))
28666 insn
= PREV_INSN (insn
);
28668 /* Up to now we've only seen notes or barriers. */
28673 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
28674 /* Trailing label: <barrier>. */
28675 fputs ("\tnop\n", file
);
28678 /* Lastly, see if we have a completely empty function body. */
28679 while (insn
&& ! INSN_P (insn
))
28680 insn
= PREV_INSN (insn
);
28681 /* If we don't find any insns, we've got an empty function body;
28682 I.e. completely empty - without a return or branch. This is
28683 taken as the case where a function body has been removed
28684 because it contains an inline __builtin_unreachable(). GCC
28685 states that reaching __builtin_unreachable() means UB so we're
28686 not obliged to do anything special; however, we want
28687 non-zero-sized function bodies. To meet this, and help the
28688 user out, let's trap the case. */
28690 fputs ("\ttrap\n", file
);
28693 else if (deleted_debug_label
)
28694 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
28695 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28696 CODE_LABEL_NUMBER (insn
) = -1;
28700 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28703 We don't output a traceback table if -finhibit-size-directive was
28704 used. The documentation for -finhibit-size-directive reads
28705 ``don't output a @code{.size} assembler directive, or anything
28706 else that would cause trouble if the function is split in the
28707 middle, and the two halves are placed at locations far apart in
28708 memory.'' The traceback table has this property, since it
28709 includes the offset from the start of the function to the
28710 traceback table itself.
28712 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28713 different traceback table. */
28714 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28715 && ! flag_inhibit_size_directive
28716 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
28718 const char *fname
= NULL
;
28719 const char *language_string
= lang_hooks
.name
;
28720 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
28722 int optional_tbtab
;
28723 rs6000_stack_t
*info
= rs6000_stack_info ();
28725 if (rs6000_traceback
== traceback_full
)
28726 optional_tbtab
= 1;
28727 else if (rs6000_traceback
== traceback_part
)
28728 optional_tbtab
= 0;
28730 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
28732 if (optional_tbtab
)
28734 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
28735 while (*fname
== '.') /* V.4 encodes . in the name */
28738 /* Need label immediately before tbtab, so we can compute
28739 its offset from the function start. */
28740 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28741 ASM_OUTPUT_LABEL (file
, fname
);
28744 /* The .tbtab pseudo-op can only be used for the first eight
28745 expressions, since it can't handle the possibly variable
28746 length fields that follow. However, if you omit the optional
28747 fields, the assembler outputs zeros for all optional fields
28748 anyways, giving each variable length field is minimum length
28749 (as defined in sys/debug.h). Thus we can not use the .tbtab
28750 pseudo-op at all. */
28752 /* An all-zero word flags the start of the tbtab, for debuggers
28753 that have to find it by searching forward from the entry
28754 point or from the current pc. */
28755 fputs ("\t.long 0\n", file
);
28757 /* Tbtab format type. Use format type 0. */
28758 fputs ("\t.byte 0,", file
);
28760 /* Language type. Unfortunately, there does not seem to be any
28761 official way to discover the language being compiled, so we
28762 use language_string.
28763 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28764 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28765 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28766 either, so for now use 0. */
28768 || ! strcmp (language_string
, "GNU GIMPLE")
28769 || ! strcmp (language_string
, "GNU Go")
28770 || ! strcmp (language_string
, "libgccjit"))
28772 else if (! strcmp (language_string
, "GNU F77")
28773 || lang_GNU_Fortran ())
28775 else if (! strcmp (language_string
, "GNU Pascal"))
28777 else if (! strcmp (language_string
, "GNU Ada"))
28779 else if (lang_GNU_CXX ()
28780 || ! strcmp (language_string
, "GNU Objective-C++"))
28782 else if (! strcmp (language_string
, "GNU Java"))
28784 else if (! strcmp (language_string
, "GNU Objective-C"))
28787 gcc_unreachable ();
28788 fprintf (file
, "%d,", i
);
28790 /* 8 single bit fields: global linkage (not set for C extern linkage,
28791 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28792 from start of procedure stored in tbtab, internal function, function
28793 has controlled storage, function has no toc, function uses fp,
28794 function logs/aborts fp operations. */
28795 /* Assume that fp operations are used if any fp reg must be saved. */
28796 fprintf (file
, "%d,",
28797 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
28799 /* 6 bitfields: function is interrupt handler, name present in
28800 proc table, function calls alloca, on condition directives
28801 (controls stack walks, 3 bits), saves condition reg, saves
28803 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28804 set up as a frame pointer, even when there is no alloca call. */
28805 fprintf (file
, "%d,",
28806 ((optional_tbtab
<< 6)
28807 | ((optional_tbtab
& frame_pointer_needed
) << 5)
28808 | (info
->cr_save_p
<< 1)
28809 | (info
->lr_save_p
)));
28811 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28813 fprintf (file
, "%d,",
28814 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
28816 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28817 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
28819 if (optional_tbtab
)
28821 /* Compute the parameter info from the function decl argument
28824 int next_parm_info_bit
= 31;
28826 for (decl
= DECL_ARGUMENTS (current_function_decl
);
28827 decl
; decl
= DECL_CHAIN (decl
))
28829 rtx parameter
= DECL_INCOMING_RTL (decl
);
28830 machine_mode mode
= GET_MODE (parameter
);
28832 if (GET_CODE (parameter
) == REG
)
28834 if (SCALAR_FLOAT_MODE_P (mode
))
28857 gcc_unreachable ();
28860 /* If only one bit will fit, don't or in this entry. */
28861 if (next_parm_info_bit
> 0)
28862 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
28863 next_parm_info_bit
-= 2;
28867 fixed_parms
+= ((GET_MODE_SIZE (mode
)
28868 + (UNITS_PER_WORD
- 1))
28870 next_parm_info_bit
-= 1;
28876 /* Number of fixed point parameters. */
28877 /* This is actually the number of words of fixed point parameters; thus
28878 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28879 fprintf (file
, "%d,", fixed_parms
);
28881 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28883 /* This is actually the number of fp registers that hold parameters;
28884 and thus the maximum value is 13. */
28885 /* Set parameters on stack bit if parameters are not in their original
28886 registers, regardless of whether they are on the stack? Xlc
28887 seems to set the bit when not optimizing. */
28888 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
28890 if (optional_tbtab
)
28892 /* Optional fields follow. Some are variable length. */
28894 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28895 float, 11 double float. */
28896 /* There is an entry for each parameter in a register, in the order
28897 that they occur in the parameter list. Any intervening arguments
28898 on the stack are ignored. If the list overflows a long (max
28899 possible length 34 bits) then completely leave off all elements
28901 /* Only emit this long if there was at least one parameter. */
28902 if (fixed_parms
|| float_parms
)
28903 fprintf (file
, "\t.long %d\n", parm_info
);
28905 /* Offset from start of code to tb table. */
28906 fputs ("\t.long ", file
);
28907 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28908 RS6000_OUTPUT_BASENAME (file
, fname
);
28910 rs6000_output_function_entry (file
, fname
);
28913 /* Interrupt handler mask. */
28914 /* Omit this long, since we never set the interrupt handler bit
28917 /* Number of CTL (controlled storage) anchors. */
28918 /* Omit this long, since the has_ctl bit is never set above. */
28920 /* Displacement into stack of each CTL anchor. */
28921 /* Omit this list of longs, because there are no CTL anchors. */
28923 /* Length of function name. */
28926 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
28928 /* Function name. */
28929 assemble_string (fname
, strlen (fname
));
28931 /* Register for alloca automatic storage; this is always reg 31.
28932 Only emit this if the alloca bit was set above. */
28933 if (frame_pointer_needed
)
28934 fputs ("\t.byte 31\n", file
);
28936 fputs ("\t.align 2\n", file
);
28940 /* Arrange to define .LCTOC1 label, if not already done. */
28944 if (!toc_initialized
)
28946 switch_to_section (toc_section
);
28947 switch_to_section (current_function_section ());
28952 /* -fsplit-stack support. */
28954 /* A SYMBOL_REF for __morestack. */
28955 static GTY(()) rtx morestack_ref
;
28958 gen_add3_const (rtx rt
, rtx ra
, long c
)
28961 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
28963 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
28966 /* Emit -fsplit-stack prologue, which goes before the regular function
28967 prologue (at local entry point in the case of ELFv2). */
28970 rs6000_expand_split_stack_prologue (void)
28972 rs6000_stack_t
*info
= rs6000_stack_info ();
28973 unsigned HOST_WIDE_INT allocate
;
28974 long alloc_hi
, alloc_lo
;
28975 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
28978 gcc_assert (flag_split_stack
&& reload_completed
);
28983 if (global_regs
[29])
28985 error ("%qs uses register r29", "-fsplit-stack");
28986 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
28987 "conflicts with %qD", global_regs_decl
[29]);
28990 allocate
= info
->total_size
;
28991 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
28993 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28996 if (morestack_ref
== NULL_RTX
)
28998 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
28999 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
29000 | SYMBOL_FLAG_FUNCTION
);
29003 r0
= gen_rtx_REG (Pmode
, 0);
29004 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29005 r12
= gen_rtx_REG (Pmode
, 12);
29006 emit_insn (gen_load_split_stack_limit (r0
));
29007 /* Always emit two insns here to calculate the requested stack,
29008 so that the linker can edit them when adjusting size for calling
29009 non-split-stack code. */
29010 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
29011 alloc_lo
= -allocate
- alloc_hi
;
29014 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
29016 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
29018 emit_insn (gen_nop ());
29022 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
29023 emit_insn (gen_nop ());
29026 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
29027 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
29028 ok_label
= gen_label_rtx ();
29029 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29030 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
29031 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
29033 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29034 JUMP_LABEL (insn
) = ok_label
;
29035 /* Mark the jump as very likely to be taken. */
29036 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
29038 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
29039 insn
= emit_move_insn (r0
, lr
);
29040 RTX_FRAME_RELATED_P (insn
) = 1;
29041 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
29042 RTX_FRAME_RELATED_P (insn
) = 1;
29044 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
29045 const0_rtx
, const0_rtx
));
29046 call_fusage
= NULL_RTX
;
29047 use_reg (&call_fusage
, r12
);
29048 /* Say the call uses r0, even though it doesn't, to stop regrename
29049 from twiddling with the insns saving lr, trashing args for cfun.
29050 The insns restoring lr are similarly protected by making
29051 split_stack_return use r0. */
29052 use_reg (&call_fusage
, r0
);
29053 add_function_usage_to (insn
, call_fusage
);
29054 /* Indicate that this function can't jump to non-local gotos. */
29055 make_reg_eh_region_note_nothrow_nononlocal (insn
);
29056 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
29057 insn
= emit_move_insn (lr
, r0
);
29058 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
29059 RTX_FRAME_RELATED_P (insn
) = 1;
29060 emit_insn (gen_split_stack_return ());
29062 emit_label (ok_label
);
29063 LABEL_NUSES (ok_label
) = 1;
29066 /* Return the internal arg pointer used for function incoming
29067 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29068 to copy it to a pseudo in order for it to be preserved over calls
29069 and suchlike. We'd really like to use a pseudo here for the
29070 internal arg pointer but data-flow analysis is not prepared to
29071 accept pseudos as live at the beginning of a function. */
29074 rs6000_internal_arg_pointer (void)
29076 if (flag_split_stack
29077 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
29081 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
29085 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
29086 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
29088 /* Put the pseudo initialization right after the note at the
29089 beginning of the function. */
29090 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
29091 gen_rtx_REG (Pmode
, 12));
29092 push_topmost_sequence ();
29093 emit_insn_after (pat
, get_insns ());
29094 pop_topmost_sequence ();
29096 return plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
29097 FIRST_PARM_OFFSET (current_function_decl
));
29099 return virtual_incoming_args_rtx
;
29102 /* We may have to tell the dataflow pass that the split stack prologue
29103 is initializing a register. */
29106 rs6000_live_on_entry (bitmap regs
)
29108 if (flag_split_stack
)
29109 bitmap_set_bit (regs
, 12);
29112 /* Emit -fsplit-stack dynamic stack allocation space check. */
29115 rs6000_split_stack_space_check (rtx size
, rtx label
)
29117 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
29118 rtx limit
= gen_reg_rtx (Pmode
);
29119 rtx requested
= gen_reg_rtx (Pmode
);
29120 rtx cmp
= gen_reg_rtx (CCUNSmode
);
29123 emit_insn (gen_load_split_stack_limit (limit
));
29124 if (CONST_INT_P (size
))
29125 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
29128 size
= force_reg (Pmode
, size
);
29129 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
29131 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
29132 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
29133 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
29134 gen_rtx_LABEL_REF (VOIDmode
, label
),
29136 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
29137 JUMP_LABEL (jump
) = label
;
29140 /* A C compound statement that outputs the assembler code for a thunk
29141 function, used to implement C++ virtual function calls with
29142 multiple inheritance. The thunk acts as a wrapper around a virtual
29143 function, adjusting the implicit object parameter before handing
29144 control off to the real function.
29146 First, emit code to add the integer DELTA to the location that
29147 contains the incoming first argument. Assume that this argument
29148 contains a pointer, and is the one used to pass the `this' pointer
29149 in C++. This is the incoming argument *before* the function
29150 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29151 values of all other incoming arguments.
29153 After the addition, emit code to jump to FUNCTION, which is a
29154 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29155 not touch the return address. Hence returning from FUNCTION will
29156 return to whoever called the current `thunk'.
29158 The effect must be as if FUNCTION had been called directly with the
29159 adjusted first argument. This macro is responsible for emitting
29160 all of the code for a thunk function; output_function_prologue()
29161 and output_function_epilogue() are not invoked.
29163 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29164 been extracted from it.) It might possibly be useful on some
29165 targets, but probably not.
29167 If you do not define this macro, the target-independent code in the
29168 C++ frontend will generate a less efficient heavyweight thunk that
29169 calls FUNCTION instead of jumping to it. The generic approach does
29170 not support varargs. */
29173 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
29174 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
29177 rtx this_rtx
, funexp
;
29180 reload_completed
= 1;
29181 epilogue_completed
= 1;
29183 /* Mark the end of the (empty) prologue. */
29184 emit_note (NOTE_INSN_PROLOGUE_END
);
29186 /* Find the "this" pointer. If the function returns a structure,
29187 the structure return pointer is in r3. */
29188 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
29189 this_rtx
= gen_rtx_REG (Pmode
, 4);
29191 this_rtx
= gen_rtx_REG (Pmode
, 3);
29193 /* Apply the constant offset, if required. */
29195 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
29197 /* Apply the offset from the vtable, if required. */
29200 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
29201 rtx tmp
= gen_rtx_REG (Pmode
, 12);
29203 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
29204 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
29206 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
29207 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
29211 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
29213 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
29215 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
29218 /* Generate a tail call to the target function. */
29219 if (!TREE_USED (function
))
29221 assemble_external (function
);
29222 TREE_USED (function
) = 1;
29224 funexp
= XEXP (DECL_RTL (function
), 0);
29225 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
29228 if (MACHOPIC_INDIRECT
)
29229 funexp
= machopic_indirect_call_target (funexp
);
29232 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29233 generate sibcall RTL explicitly. */
29234 insn
= emit_call_insn (
29235 gen_rtx_PARALLEL (VOIDmode
,
29237 gen_rtx_CALL (VOIDmode
,
29238 funexp
, const0_rtx
),
29239 gen_rtx_USE (VOIDmode
, const0_rtx
),
29240 simple_return_rtx
)));
29241 SIBLING_CALL_P (insn
) = 1;
29244 /* Run just enough of rest_of_compilation to get the insns emitted.
29245 There's not really enough bulk here to make other passes such as
29246 instruction scheduling worth while. Note that use_thunk calls
29247 assemble_start_function and assemble_end_function. */
29248 insn
= get_insns ();
29249 shorten_branches (insn
);
29250 final_start_function (insn
, file
, 1);
29251 final (insn
, file
, 1);
29252 final_end_function ();
29254 reload_completed
= 0;
29255 epilogue_completed
= 0;
29258 /* A quick summary of the various types of 'constant-pool tables'
29261 Target Flags Name One table per
29262 AIX (none) AIX TOC object file
29263 AIX -mfull-toc AIX TOC object file
29264 AIX -mminimal-toc AIX minimal TOC translation unit
29265 SVR4/EABI (none) SVR4 SDATA object file
29266 SVR4/EABI -fpic SVR4 pic object file
29267 SVR4/EABI -fPIC SVR4 PIC translation unit
29268 SVR4/EABI -mrelocatable EABI TOC function
29269 SVR4/EABI -maix AIX TOC object file
29270 SVR4/EABI -maix -mminimal-toc
29271 AIX minimal TOC translation unit
29273 Name Reg. Set by entries contains:
29274 made by addrs? fp? sum?
29276 AIX TOC 2 crt0 as Y option option
29277 AIX minimal TOC 30 prolog gcc Y Y option
29278 SVR4 SDATA 13 crt0 gcc N Y N
29279 SVR4 pic 30 prolog ld Y not yet N
29280 SVR4 PIC 30 prolog gcc Y option option
29281 EABI TOC 30 prolog gcc Y option option
29285 /* Hash functions for the hash table. */
29288 rs6000_hash_constant (rtx k
)
29290 enum rtx_code code
= GET_CODE (k
);
29291 machine_mode mode
= GET_MODE (k
);
29292 unsigned result
= (code
<< 3) ^ mode
;
29293 const char *format
;
29296 format
= GET_RTX_FORMAT (code
);
29297 flen
= strlen (format
);
29303 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29305 case CONST_WIDE_INT
:
29308 flen
= CONST_WIDE_INT_NUNITS (k
);
29309 for (i
= 0; i
< flen
; i
++)
29310 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29315 if (mode
!= VOIDmode
)
29316 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29328 for (; fidx
< flen
; fidx
++)
29329 switch (format
[fidx
])
29334 const char *str
= XSTR (k
, fidx
);
29335 len
= strlen (str
);
29336 result
= result
* 613 + len
;
29337 for (i
= 0; i
< len
; i
++)
29338 result
= result
* 613 + (unsigned) str
[i
];
29343 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29347 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29350 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29351 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29355 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29356 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29363 gcc_unreachable ();
29370 toc_hasher::hash (toc_hash_struct
*thc
)
29372 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29375 /* Compare H1 and H2 for equivalence. */
29378 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29383 if (h1
->key_mode
!= h2
->key_mode
)
29386 return rtx_equal_p (r1
, r2
);
29389 /* These are the names given by the C++ front-end to vtables, and
29390 vtable-like objects. Ideally, this logic should not be here;
29391 instead, there should be some programmatic way of inquiring as
29392 to whether or not an object is a vtable. */
29394 #define VTABLE_NAME_P(NAME) \
29395 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29396 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29397 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29398 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29399 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29401 #ifdef NO_DOLLAR_IN_LABEL
29402 /* Return a GGC-allocated character string translating dollar signs in
29403 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29406 rs6000_xcoff_strip_dollar (const char *name
)
29412 q
= (const char *) strchr (name
, '$');
29414 if (q
== 0 || q
== name
)
29417 len
= strlen (name
);
29418 strip
= XALLOCAVEC (char, len
+ 1);
29419 strcpy (strip
, name
);
29420 p
= strip
+ (q
- name
);
29424 p
= strchr (p
+ 1, '$');
29427 return ggc_alloc_string (strip
, len
);
29432 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29434 const char *name
= XSTR (x
, 0);
29436 /* Currently C++ toc references to vtables can be emitted before it
29437 is decided whether the vtable is public or private. If this is
29438 the case, then the linker will eventually complain that there is
29439 a reference to an unknown section. Thus, for vtables only,
29440 we emit the TOC reference to reference the identifier and not the
29442 if (VTABLE_NAME_P (name
))
29444 RS6000_OUTPUT_BASENAME (file
, name
);
29447 assemble_name (file
, name
);
29450 /* Output a TOC entry. We derive the entry name from what is being
29454 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29457 const char *name
= buf
;
29459 HOST_WIDE_INT offset
= 0;
29461 gcc_assert (!TARGET_NO_TOC
);
29463 /* When the linker won't eliminate them, don't output duplicate
29464 TOC entries (this happens on AIX if there is any kind of TOC,
29465 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29467 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29469 struct toc_hash_struct
*h
;
29471 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29472 time because GGC is not initialized at that point. */
29473 if (toc_hash_table
== NULL
)
29474 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29476 h
= ggc_alloc
<toc_hash_struct
> ();
29478 h
->key_mode
= mode
;
29479 h
->labelno
= labelno
;
29481 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29482 if (*found
== NULL
)
29484 else /* This is indeed a duplicate.
29485 Set this label equal to that label. */
29487 fputs ("\t.set ", file
);
29488 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29489 fprintf (file
, "%d,", labelno
);
29490 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29491 fprintf (file
, "%d\n", ((*found
)->labelno
));
29494 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29495 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29496 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29498 fputs ("\t.set ", file
);
29499 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29500 fprintf (file
, "%d,", labelno
);
29501 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29502 fprintf (file
, "%d\n", ((*found
)->labelno
));
29509 /* If we're going to put a double constant in the TOC, make sure it's
29510 aligned properly when strict alignment is on. */
29511 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29512 && STRICT_ALIGNMENT
29513 && GET_MODE_BITSIZE (mode
) >= 64
29514 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29515 ASM_OUTPUT_ALIGN (file
, 3);
29518 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29520 /* Handle FP constants specially. Note that if we have a minimal
29521 TOC, things we put here aren't actually in the TOC, so we can allow
29523 if (GET_CODE (x
) == CONST_DOUBLE
&&
29524 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29525 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29529 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29530 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29532 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29536 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29537 fputs (DOUBLE_INT_ASM_OP
, file
);
29539 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29540 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29541 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29542 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29543 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29544 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29545 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29546 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
29551 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29552 fputs ("\t.long ", file
);
29554 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29555 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29556 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29557 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29558 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29559 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29563 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29564 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
29568 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29569 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29571 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29575 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29576 fputs (DOUBLE_INT_ASM_OP
, file
);
29578 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29579 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29580 fprintf (file
, "0x%lx%08lx\n",
29581 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29582 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
29587 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29588 fputs ("\t.long ", file
);
29590 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29591 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29592 fprintf (file
, "0x%lx,0x%lx\n",
29593 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29597 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29598 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
29602 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29603 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29605 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29609 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29610 fputs (DOUBLE_INT_ASM_OP
, file
);
29612 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29613 if (WORDS_BIG_ENDIAN
)
29614 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
29616 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29621 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29622 fputs ("\t.long ", file
);
29624 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29625 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29629 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
29631 unsigned HOST_WIDE_INT low
;
29632 HOST_WIDE_INT high
;
29634 low
= INTVAL (x
) & 0xffffffff;
29635 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
29637 /* TOC entries are always Pmode-sized, so when big-endian
29638 smaller integer constants in the TOC need to be padded.
29639 (This is still a win over putting the constants in
29640 a separate constant pool, because then we'd have
29641 to have both a TOC entry _and_ the actual constant.)
29643 For a 32-bit target, CONST_INT values are loaded and shifted
29644 entirely within `low' and can be stored in one TOC entry. */
29646 /* It would be easy to make this work, but it doesn't now. */
29647 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
29649 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
29652 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
29653 high
= (HOST_WIDE_INT
) low
>> 32;
29659 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29660 fputs (DOUBLE_INT_ASM_OP
, file
);
29662 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29663 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29664 fprintf (file
, "0x%lx%08lx\n",
29665 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29670 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
29672 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29673 fputs ("\t.long ", file
);
29675 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29676 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29677 fprintf (file
, "0x%lx,0x%lx\n",
29678 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29682 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29683 fputs ("\t.long ", file
);
29685 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
29686 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
29692 if (GET_CODE (x
) == CONST
)
29694 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
29695 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
29697 base
= XEXP (XEXP (x
, 0), 0);
29698 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
29701 switch (GET_CODE (base
))
29704 name
= XSTR (base
, 0);
29708 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
29709 CODE_LABEL_NUMBER (XEXP (base
, 0)));
29713 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
29717 gcc_unreachable ();
29720 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29721 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
29724 fputs ("\t.tc ", file
);
29725 RS6000_OUTPUT_BASENAME (file
, name
);
29728 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
29730 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
29732 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29733 after other TOC symbols, reducing overflow of small TOC access
29734 to [TC] symbols. */
29735 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
29736 ? "[TE]," : "[TC],", file
);
29739 /* Currently C++ toc references to vtables can be emitted before it
29740 is decided whether the vtable is public or private. If this is
29741 the case, then the linker will eventually complain that there is
29742 a TOC reference to an unknown section. Thus, for vtables only,
29743 we emit the TOC reference to reference the symbol and not the
29745 if (VTABLE_NAME_P (name
))
29747 RS6000_OUTPUT_BASENAME (file
, name
);
29749 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
29750 else if (offset
> 0)
29751 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
29754 output_addr_const (file
, x
);
29757 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
29759 switch (SYMBOL_REF_TLS_MODEL (base
))
29763 case TLS_MODEL_LOCAL_EXEC
:
29764 fputs ("@le", file
);
29766 case TLS_MODEL_INITIAL_EXEC
:
29767 fputs ("@ie", file
);
29769 /* Use global-dynamic for local-dynamic. */
29770 case TLS_MODEL_GLOBAL_DYNAMIC
:
29771 case TLS_MODEL_LOCAL_DYNAMIC
:
29773 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
29774 fputs ("\t.tc .", file
);
29775 RS6000_OUTPUT_BASENAME (file
, name
);
29776 fputs ("[TC],", file
);
29777 output_addr_const (file
, x
);
29778 fputs ("@m", file
);
29781 gcc_unreachable ();
29789 /* Output an assembler pseudo-op to write an ASCII string of N characters
29790 starting at P to FILE.
29792 On the RS/6000, we have to do this using the .byte operation and
29793 write out special characters outside the quoted string.
29794 Also, the assembler is broken; very long strings are truncated,
29795 so we must artificially break them up early. */
29798 output_ascii (FILE *file
, const char *p
, int n
)
29801 int i
, count_string
;
29802 const char *for_string
= "\t.byte \"";
29803 const char *for_decimal
= "\t.byte ";
29804 const char *to_close
= NULL
;
29807 for (i
= 0; i
< n
; i
++)
29810 if (c
>= ' ' && c
< 0177)
29813 fputs (for_string
, file
);
29816 /* Write two quotes to get one. */
29824 for_decimal
= "\"\n\t.byte ";
29828 if (count_string
>= 512)
29830 fputs (to_close
, file
);
29832 for_string
= "\t.byte \"";
29833 for_decimal
= "\t.byte ";
29841 fputs (for_decimal
, file
);
29842 fprintf (file
, "%d", c
);
29844 for_string
= "\n\t.byte \"";
29845 for_decimal
= ", ";
29851 /* Now close the string if we have written one. Then end the line. */
29853 fputs (to_close
, file
);
29856 /* Generate a unique section name for FILENAME for a section type
29857 represented by SECTION_DESC. Output goes into BUF.
29859 SECTION_DESC can be any string, as long as it is different for each
29860 possible section type.
29862 We name the section in the same manner as xlc. The name begins with an
29863 underscore followed by the filename (after stripping any leading directory
29864 names) with the last period replaced by the string SECTION_DESC. If
29865 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29869 rs6000_gen_section_name (char **buf
, const char *filename
,
29870 const char *section_desc
)
29872 const char *q
, *after_last_slash
, *last_period
= 0;
29876 after_last_slash
= filename
;
29877 for (q
= filename
; *q
; q
++)
29880 after_last_slash
= q
+ 1;
29881 else if (*q
== '.')
29885 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
29886 *buf
= (char *) xmalloc (len
);
29891 for (q
= after_last_slash
; *q
; q
++)
29893 if (q
== last_period
)
29895 strcpy (p
, section_desc
);
29896 p
+= strlen (section_desc
);
29900 else if (ISALNUM (*q
))
29904 if (last_period
== 0)
29905 strcpy (p
, section_desc
);
29910 /* Emit profile function. */
29913 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
29915 /* Non-standard profiling for kernels, which just saves LR then calls
29916 _mcount without worrying about arg saves. The idea is to change
29917 the function prologue as little as possible as it isn't easy to
29918 account for arg save/restore code added just for _mcount. */
29919 if (TARGET_PROFILE_KERNEL
)
29922 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29924 #ifndef NO_PROFILE_COUNTERS
29925 # define NO_PROFILE_COUNTERS 0
29927 if (NO_PROFILE_COUNTERS
)
29928 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
29929 LCT_NORMAL
, VOIDmode
, 0);
29933 const char *label_name
;
29936 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
29937 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
29938 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
29940 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
29941 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
29944 else if (DEFAULT_ABI
== ABI_DARWIN
)
29946 const char *mcount_name
= RS6000_MCOUNT
;
29947 int caller_addr_regno
= LR_REGNO
;
29949 /* Be conservative and always set this, at least for now. */
29950 crtl
->uses_pic_offset_table
= 1;
29953 /* For PIC code, set up a stub and collect the caller's address
29954 from r0, which is where the prologue puts it. */
29955 if (MACHOPIC_INDIRECT
29956 && crtl
->uses_pic_offset_table
)
29957 caller_addr_regno
= 0;
29959 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
29960 LCT_NORMAL
, VOIDmode
, 1,
29961 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
29965 /* Write function profiler code. */
29968 output_function_profiler (FILE *file
, int labelno
)
29972 switch (DEFAULT_ABI
)
29975 gcc_unreachable ();
29980 warning (0, "no profiling of 64-bit code for this ABI");
29983 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
29984 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
29985 if (NO_PROFILE_COUNTERS
)
29987 asm_fprintf (file
, "\tstw %s,4(%s)\n",
29988 reg_names
[0], reg_names
[1]);
29990 else if (TARGET_SECURE_PLT
&& flag_pic
)
29992 if (TARGET_LINK_STACK
)
29995 get_ppc476_thunk_name (name
);
29996 asm_fprintf (file
, "\tbl %s\n", name
);
29999 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
30000 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30001 reg_names
[0], reg_names
[1]);
30002 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30003 asm_fprintf (file
, "\taddis %s,%s,",
30004 reg_names
[12], reg_names
[12]);
30005 assemble_name (file
, buf
);
30006 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
30007 assemble_name (file
, buf
);
30008 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
30010 else if (flag_pic
== 1)
30012 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
30013 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30014 reg_names
[0], reg_names
[1]);
30015 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
30016 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
30017 assemble_name (file
, buf
);
30018 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
30020 else if (flag_pic
> 1)
30022 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30023 reg_names
[0], reg_names
[1]);
30024 /* Now, we need to get the address of the label. */
30025 if (TARGET_LINK_STACK
)
30028 get_ppc476_thunk_name (name
);
30029 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
30030 assemble_name (file
, buf
);
30031 fputs ("-.\n1:", file
);
30032 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30033 asm_fprintf (file
, "\taddi %s,%s,4\n",
30034 reg_names
[11], reg_names
[11]);
30038 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
30039 assemble_name (file
, buf
);
30040 fputs ("-.\n1:", file
);
30041 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
30043 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
30044 reg_names
[0], reg_names
[11]);
30045 asm_fprintf (file
, "\tadd %s,%s,%s\n",
30046 reg_names
[0], reg_names
[0], reg_names
[11]);
30050 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
30051 assemble_name (file
, buf
);
30052 fputs ("@ha\n", file
);
30053 asm_fprintf (file
, "\tstw %s,4(%s)\n",
30054 reg_names
[0], reg_names
[1]);
30055 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
30056 assemble_name (file
, buf
);
30057 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
30060 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30061 fprintf (file
, "\tbl %s%s\n",
30062 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
30068 /* Don't do anything, done in output_profile_hook (). */
30075 /* The following variable value is the last issued insn. */
30077 static rtx_insn
*last_scheduled_insn
;
30079 /* The following variable helps to balance issuing of load and
30080 store instructions */
30082 static int load_store_pendulum
;
30084 /* The following variable helps pair divide insns during scheduling. */
30085 static int divide_cnt
;
30086 /* The following variable helps pair and alternate vector and vector load
30087 insns during scheduling. */
30088 static int vec_pairing
;
30091 /* Power4 load update and store update instructions are cracked into a
30092 load or store and an integer insn which are executed in the same cycle.
30093 Branches have their own dispatch slot which does not count against the
30094 GCC issue rate, but it changes the program flow so there are no other
30095 instructions to issue in this cycle. */
30098 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
30100 last_scheduled_insn
= insn
;
30101 if (GET_CODE (PATTERN (insn
)) == USE
30102 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30104 cached_can_issue_more
= more
;
30105 return cached_can_issue_more
;
30108 if (insn_terminates_group_p (insn
, current_group
))
30110 cached_can_issue_more
= 0;
30111 return cached_can_issue_more
;
30114 /* If no reservation, but reach here */
30115 if (recog_memoized (insn
) < 0)
30118 if (rs6000_sched_groups
)
30120 if (is_microcoded_insn (insn
))
30121 cached_can_issue_more
= 0;
30122 else if (is_cracked_insn (insn
))
30123 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
30125 cached_can_issue_more
= more
- 1;
30127 return cached_can_issue_more
;
30130 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
30133 cached_can_issue_more
= more
- 1;
30134 return cached_can_issue_more
;
30138 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
30140 int r
= rs6000_variable_issue_1 (insn
, more
);
30142 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
30146 /* Adjust the cost of a scheduling dependency. Return the new cost of
30147 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30150 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
30153 enum attr_type attr_type
;
30155 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
30162 /* Data dependency; DEP_INSN writes a register that INSN reads
30163 some cycles later. */
30165 /* Separate a load from a narrower, dependent store. */
30166 if ((rs6000_sched_groups
|| rs6000_cpu_attr
== CPU_POWER9
)
30167 && GET_CODE (PATTERN (insn
)) == SET
30168 && GET_CODE (PATTERN (dep_insn
)) == SET
30169 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
30170 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
30171 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
30172 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
30175 attr_type
= get_attr_type (insn
);
30180 /* Tell the first scheduling pass about the latency between
30181 a mtctr and bctr (and mtlr and br/blr). The first
30182 scheduling pass will not know about this latency since
30183 the mtctr instruction, which has the latency associated
30184 to it, will be generated by reload. */
30187 /* Leave some extra cycles between a compare and its
30188 dependent branch, to inhibit expensive mispredicts. */
30189 if ((rs6000_cpu_attr
== CPU_PPC603
30190 || rs6000_cpu_attr
== CPU_PPC604
30191 || rs6000_cpu_attr
== CPU_PPC604E
30192 || rs6000_cpu_attr
== CPU_PPC620
30193 || rs6000_cpu_attr
== CPU_PPC630
30194 || rs6000_cpu_attr
== CPU_PPC750
30195 || rs6000_cpu_attr
== CPU_PPC7400
30196 || rs6000_cpu_attr
== CPU_PPC7450
30197 || rs6000_cpu_attr
== CPU_PPCE5500
30198 || rs6000_cpu_attr
== CPU_PPCE6500
30199 || rs6000_cpu_attr
== CPU_POWER4
30200 || rs6000_cpu_attr
== CPU_POWER5
30201 || rs6000_cpu_attr
== CPU_POWER7
30202 || rs6000_cpu_attr
== CPU_POWER8
30203 || rs6000_cpu_attr
== CPU_POWER9
30204 || rs6000_cpu_attr
== CPU_CELL
)
30205 && recog_memoized (dep_insn
)
30206 && (INSN_CODE (dep_insn
) >= 0))
30208 switch (get_attr_type (dep_insn
))
30211 case TYPE_FPCOMPARE
:
30212 case TYPE_CR_LOGICAL
:
30213 case TYPE_DELAYED_CR
:
30217 if (get_attr_dot (dep_insn
) == DOT_YES
)
30222 if (get_attr_dot (dep_insn
) == DOT_YES
30223 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
30234 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30235 && recog_memoized (dep_insn
)
30236 && (INSN_CODE (dep_insn
) >= 0))
30239 if (GET_CODE (PATTERN (insn
)) != SET
)
30240 /* If this happens, we have to extend this to schedule
30241 optimally. Return default for now. */
30244 /* Adjust the cost for the case where the value written
30245 by a fixed point operation is used as the address
30246 gen value on a store. */
30247 switch (get_attr_type (dep_insn
))
30252 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30253 return get_attr_sign_extend (dep_insn
)
30254 == SIGN_EXTEND_YES
? 6 : 4;
30259 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30260 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30270 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30278 if (get_attr_update (dep_insn
) == UPDATE_YES
30279 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30285 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30291 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30292 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30302 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30303 && recog_memoized (dep_insn
)
30304 && (INSN_CODE (dep_insn
) >= 0))
30307 /* Adjust the cost for the case where the value written
30308 by a fixed point instruction is used within the address
30309 gen portion of a subsequent load(u)(x) */
30310 switch (get_attr_type (dep_insn
))
30315 if (set_to_load_agen (dep_insn
, insn
))
30316 return get_attr_sign_extend (dep_insn
)
30317 == SIGN_EXTEND_YES
? 6 : 4;
30322 if (set_to_load_agen (dep_insn
, insn
))
30323 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30333 if (set_to_load_agen (dep_insn
, insn
))
30341 if (get_attr_update (dep_insn
) == UPDATE_YES
30342 && set_to_load_agen (dep_insn
, insn
))
30348 if (set_to_load_agen (dep_insn
, insn
))
30354 if (set_to_load_agen (dep_insn
, insn
))
30355 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30365 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30366 && get_attr_update (insn
) == UPDATE_NO
30367 && recog_memoized (dep_insn
)
30368 && (INSN_CODE (dep_insn
) >= 0)
30369 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30376 /* Fall out to return default cost. */
30380 case REG_DEP_OUTPUT
:
30381 /* Output dependency; DEP_INSN writes a register that INSN writes some
30383 if ((rs6000_cpu
== PROCESSOR_POWER6
)
30384 && recog_memoized (dep_insn
)
30385 && (INSN_CODE (dep_insn
) >= 0))
30387 attr_type
= get_attr_type (insn
);
30392 case TYPE_FPSIMPLE
:
30393 if (get_attr_type (dep_insn
) == TYPE_FP
30394 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30398 if (get_attr_update (insn
) == UPDATE_NO
30399 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30406 /* Fall through, no cost for output dependency. */
30410 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30415 gcc_unreachable ();
30421 /* Debug version of rs6000_adjust_cost. */
30424 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30425 int cost
, unsigned int dw
)
30427 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30435 default: dep
= "unknown depencency"; break;
30436 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30437 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30438 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30442 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30443 "%s, insn:\n", ret
, cost
, dep
);
30451 /* The function returns a true if INSN is microcoded.
30452 Return false otherwise. */
30455 is_microcoded_insn (rtx_insn
*insn
)
30457 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30458 || GET_CODE (PATTERN (insn
)) == USE
30459 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30462 if (rs6000_cpu_attr
== CPU_CELL
)
30463 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30465 if (rs6000_sched_groups
30466 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30468 enum attr_type type
= get_attr_type (insn
);
30469 if ((type
== TYPE_LOAD
30470 && get_attr_update (insn
) == UPDATE_YES
30471 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30472 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30473 && get_attr_update (insn
) == UPDATE_YES
30474 && get_attr_indexed (insn
) == INDEXED_YES
)
30475 || type
== TYPE_MFCR
)
30482 /* The function returns true if INSN is cracked into 2 instructions
30483 by the processor (and therefore occupies 2 issue slots). */
30486 is_cracked_insn (rtx_insn
*insn
)
30488 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30489 || GET_CODE (PATTERN (insn
)) == USE
30490 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30493 if (rs6000_sched_groups
30494 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
30496 enum attr_type type
= get_attr_type (insn
);
30497 if ((type
== TYPE_LOAD
30498 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30499 && get_attr_update (insn
) == UPDATE_NO
)
30500 || (type
== TYPE_LOAD
30501 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30502 && get_attr_update (insn
) == UPDATE_YES
30503 && get_attr_indexed (insn
) == INDEXED_NO
)
30504 || (type
== TYPE_STORE
30505 && get_attr_update (insn
) == UPDATE_YES
30506 && get_attr_indexed (insn
) == INDEXED_NO
)
30507 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30508 && get_attr_update (insn
) == UPDATE_YES
)
30509 || type
== TYPE_DELAYED_CR
30510 || (type
== TYPE_EXTS
30511 && get_attr_dot (insn
) == DOT_YES
)
30512 || (type
== TYPE_SHIFT
30513 && get_attr_dot (insn
) == DOT_YES
30514 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30515 || (type
== TYPE_MUL
30516 && get_attr_dot (insn
) == DOT_YES
)
30517 || type
== TYPE_DIV
30518 || (type
== TYPE_INSERT
30519 && get_attr_size (insn
) == SIZE_32
))
30526 /* The function returns true if INSN can be issued only from
30527 the branch slot. */
30530 is_branch_slot_insn (rtx_insn
*insn
)
30532 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30533 || GET_CODE (PATTERN (insn
)) == USE
30534 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30537 if (rs6000_sched_groups
)
30539 enum attr_type type
= get_attr_type (insn
);
30540 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
30548 /* The function returns true if out_inst sets a value that is
30549 used in the address generation computation of in_insn */
30551 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
30553 rtx out_set
, in_set
;
30555 /* For performance reasons, only handle the simple case where
30556 both loads are a single_set. */
30557 out_set
= single_set (out_insn
);
30560 in_set
= single_set (in_insn
);
30562 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
30568 /* Try to determine base/offset/size parts of the given MEM.
30569 Return true if successful, false if all the values couldn't
30572 This function only looks for REG or REG+CONST address forms.
30573 REG+REG address form will return false. */
30576 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
30577 HOST_WIDE_INT
*size
)
30580 if MEM_SIZE_KNOWN_P (mem
)
30581 *size
= MEM_SIZE (mem
);
30585 addr_rtx
= (XEXP (mem
, 0));
30586 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
30587 addr_rtx
= XEXP (addr_rtx
, 1);
30590 while (GET_CODE (addr_rtx
) == PLUS
30591 && CONST_INT_P (XEXP (addr_rtx
, 1)))
30593 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
30594 addr_rtx
= XEXP (addr_rtx
, 0);
30596 if (!REG_P (addr_rtx
))
30603 /* The function returns true if the target storage location of
30604 mem1 is adjacent to the target storage location of mem2 */
30605 /* Return 1 if memory locations are adjacent. */
30608 adjacent_mem_locations (rtx mem1
, rtx mem2
)
30611 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30613 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30614 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30615 return ((REGNO (reg1
) == REGNO (reg2
))
30616 && ((off1
+ size1
== off2
)
30617 || (off2
+ size2
== off1
)));
30622 /* This function returns true if it can be determined that the two MEM
30623 locations overlap by at least 1 byte based on base reg/offset/size. */
30626 mem_locations_overlap (rtx mem1
, rtx mem2
)
30629 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30631 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30632 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30633 return ((REGNO (reg1
) == REGNO (reg2
))
30634 && (((off1
<= off2
) && (off1
+ size1
> off2
))
30635 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
30640 /* A C statement (sans semicolon) to update the integer scheduling
30641 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30642 INSN earlier, reduce the priority to execute INSN later. Do not
30643 define this macro if you do not need to adjust the scheduling
30644 priorities of insns. */
30647 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
30649 rtx load_mem
, str_mem
;
30650 /* On machines (like the 750) which have asymmetric integer units,
30651 where one integer unit can do multiply and divides and the other
30652 can't, reduce the priority of multiply/divide so it is scheduled
30653 before other integer operations. */
30656 if (! INSN_P (insn
))
30659 if (GET_CODE (PATTERN (insn
)) == USE
)
30662 switch (rs6000_cpu_attr
) {
30664 switch (get_attr_type (insn
))
30671 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
30672 priority
, priority
);
30673 if (priority
>= 0 && priority
< 0x01000000)
30680 if (insn_must_be_first_in_group (insn
)
30681 && reload_completed
30682 && current_sched_info
->sched_max_insns_priority
30683 && rs6000_sched_restricted_insns_priority
)
30686 /* Prioritize insns that can be dispatched only in the first
30688 if (rs6000_sched_restricted_insns_priority
== 1)
30689 /* Attach highest priority to insn. This means that in
30690 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30691 precede 'priority' (critical path) considerations. */
30692 return current_sched_info
->sched_max_insns_priority
;
30693 else if (rs6000_sched_restricted_insns_priority
== 2)
30694 /* Increase priority of insn by a minimal amount. This means that in
30695 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30696 considerations precede dispatch-slot restriction considerations. */
30697 return (priority
+ 1);
30700 if (rs6000_cpu
== PROCESSOR_POWER6
30701 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
30702 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
30703 /* Attach highest priority to insn if the scheduler has just issued two
30704 stores and this instruction is a load, or two loads and this instruction
30705 is a store. Power6 wants loads and stores scheduled alternately
30707 return current_sched_info
->sched_max_insns_priority
;
30712 /* Return true if the instruction is nonpipelined on the Cell. */
30714 is_nonpipeline_insn (rtx_insn
*insn
)
30716 enum attr_type type
;
30717 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30718 || GET_CODE (PATTERN (insn
)) == USE
30719 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30722 type
= get_attr_type (insn
);
30723 if (type
== TYPE_MUL
30724 || type
== TYPE_DIV
30725 || type
== TYPE_SDIV
30726 || type
== TYPE_DDIV
30727 || type
== TYPE_SSQRT
30728 || type
== TYPE_DSQRT
30729 || type
== TYPE_MFCR
30730 || type
== TYPE_MFCRF
30731 || type
== TYPE_MFJMPR
)
30739 /* Return how many instructions the machine can issue per cycle. */
30742 rs6000_issue_rate (void)
30744 /* Unless scheduling for register pressure, use issue rate of 1 for
30745 first scheduling pass to decrease degradation. */
30746 if (!reload_completed
&& !flag_sched_pressure
)
30749 switch (rs6000_cpu_attr
) {
30751 case CPU_PPC601
: /* ? */
30761 case CPU_PPCE300C2
:
30762 case CPU_PPCE300C3
:
30763 case CPU_PPCE500MC
:
30764 case CPU_PPCE500MC64
:
30789 /* Return how many instructions to look ahead for better insn
30793 rs6000_use_sched_lookahead (void)
30795 switch (rs6000_cpu_attr
)
30802 return (reload_completed
? 8 : 0);
30809 /* We are choosing insn from the ready queue. Return zero if INSN can be
30812 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
30814 if (ready_index
== 0)
30817 if (rs6000_cpu_attr
!= CPU_CELL
)
30820 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
30822 if (!reload_completed
30823 || is_nonpipeline_insn (insn
)
30824 || is_microcoded_insn (insn
))
30830 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30831 and return true. */
30834 find_mem_ref (rtx pat
, rtx
*mem_ref
)
30839 /* stack_tie does not produce any real memory traffic. */
30840 if (tie_operand (pat
, VOIDmode
))
30843 if (GET_CODE (pat
) == MEM
)
30849 /* Recursively process the pattern. */
30850 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
30852 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
30856 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
30859 else if (fmt
[i
] == 'E')
30860 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
30862 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
30870 /* Determine if PAT is a PATTERN of a load insn. */
30873 is_load_insn1 (rtx pat
, rtx
*load_mem
)
30875 if (!pat
|| pat
== NULL_RTX
)
30878 if (GET_CODE (pat
) == SET
)
30879 return find_mem_ref (SET_SRC (pat
), load_mem
);
30881 if (GET_CODE (pat
) == PARALLEL
)
30885 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
30886 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
30893 /* Determine if INSN loads from memory. */
30896 is_load_insn (rtx insn
, rtx
*load_mem
)
30898 if (!insn
|| !INSN_P (insn
))
30904 return is_load_insn1 (PATTERN (insn
), load_mem
);
30907 /* Determine if PAT is a PATTERN of a store insn. */
30910 is_store_insn1 (rtx pat
, rtx
*str_mem
)
30912 if (!pat
|| pat
== NULL_RTX
)
30915 if (GET_CODE (pat
) == SET
)
30916 return find_mem_ref (SET_DEST (pat
), str_mem
);
30918 if (GET_CODE (pat
) == PARALLEL
)
30922 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
30923 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
30930 /* Determine if INSN stores to memory. */
30933 is_store_insn (rtx insn
, rtx
*str_mem
)
30935 if (!insn
|| !INSN_P (insn
))
30938 return is_store_insn1 (PATTERN (insn
), str_mem
);
30941 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30944 is_power9_pairable_vec_type (enum attr_type type
)
30948 case TYPE_VECSIMPLE
:
30949 case TYPE_VECCOMPLEX
:
30953 case TYPE_VECFLOAT
:
30955 case TYPE_VECDOUBLE
:
30963 /* Returns whether the dependence between INSN and NEXT is considered
30964 costly by the given target. */
30967 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
30971 rtx load_mem
, str_mem
;
30973 /* If the flag is not enabled - no dependence is considered costly;
30974 allow all dependent insns in the same group.
30975 This is the most aggressive option. */
30976 if (rs6000_sched_costly_dep
== no_dep_costly
)
30979 /* If the flag is set to 1 - a dependence is always considered costly;
30980 do not allow dependent instructions in the same group.
30981 This is the most conservative option. */
30982 if (rs6000_sched_costly_dep
== all_deps_costly
)
30985 insn
= DEP_PRO (dep
);
30986 next
= DEP_CON (dep
);
30988 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
30989 && is_load_insn (next
, &load_mem
)
30990 && is_store_insn (insn
, &str_mem
))
30991 /* Prevent load after store in the same group. */
30994 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
30995 && is_load_insn (next
, &load_mem
)
30996 && is_store_insn (insn
, &str_mem
)
30997 && DEP_TYPE (dep
) == REG_DEP_TRUE
30998 && mem_locations_overlap(str_mem
, load_mem
))
30999 /* Prevent load after store in the same group if it is a true
31003 /* The flag is set to X; dependences with latency >= X are considered costly,
31004 and will not be scheduled in the same group. */
31005 if (rs6000_sched_costly_dep
<= max_dep_latency
31006 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
31012 /* Return the next insn after INSN that is found before TAIL is reached,
31013 skipping any "non-active" insns - insns that will not actually occupy
31014 an issue slot. Return NULL_RTX if such an insn is not found. */
31017 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
31019 if (insn
== NULL_RTX
|| insn
== tail
)
31024 insn
= NEXT_INSN (insn
);
31025 if (insn
== NULL_RTX
|| insn
== tail
)
31029 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
31030 || (NONJUMP_INSN_P (insn
)
31031 && GET_CODE (PATTERN (insn
)) != USE
31032 && GET_CODE (PATTERN (insn
)) != CLOBBER
31033 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
31039 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31042 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
31047 enum attr_type type
, type2
;
31049 type
= get_attr_type (last_scheduled_insn
);
31051 /* Try to issue fixed point divides back-to-back in pairs so they will be
31052 routed to separate execution units and execute in parallel. */
31053 if (type
== TYPE_DIV
&& divide_cnt
== 0)
31055 /* First divide has been scheduled. */
31058 /* Scan the ready list looking for another divide, if found move it
31059 to the end of the list so it is chosen next. */
31063 if (recog_memoized (ready
[pos
]) >= 0
31064 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
31067 for (i
= pos
; i
< lastpos
; i
++)
31068 ready
[i
] = ready
[i
+ 1];
31069 ready
[lastpos
] = tmp
;
31077 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31080 /* The best dispatch throughput for vector and vector load insns can be
31081 achieved by interleaving a vector and vector load such that they'll
31082 dispatch to the same superslice. If this pairing cannot be achieved
31083 then it is best to pair vector insns together and vector load insns
31086 To aid in this pairing, vec_pairing maintains the current state with
31087 the following values:
31089 0 : Initial state, no vecload/vector pairing has been started.
31091 1 : A vecload or vector insn has been issued and a candidate for
31092 pairing has been found and moved to the end of the ready
31094 if (type
== TYPE_VECLOAD
)
31096 /* Issued a vecload. */
31097 if (vec_pairing
== 0)
31099 int vecload_pos
= -1;
31100 /* We issued a single vecload, look for a vector insn to pair it
31101 with. If one isn't found, try to pair another vecload. */
31105 if (recog_memoized (ready
[pos
]) >= 0)
31107 type2
= get_attr_type (ready
[pos
]);
31108 if (is_power9_pairable_vec_type (type2
))
31110 /* Found a vector insn to pair with, move it to the
31111 end of the ready list so it is scheduled next. */
31113 for (i
= pos
; i
< lastpos
; i
++)
31114 ready
[i
] = ready
[i
+ 1];
31115 ready
[lastpos
] = tmp
;
31117 return cached_can_issue_more
;
31119 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
31120 /* Remember position of first vecload seen. */
31125 if (vecload_pos
>= 0)
31127 /* Didn't find a vector to pair with but did find a vecload,
31128 move it to the end of the ready list. */
31129 tmp
= ready
[vecload_pos
];
31130 for (i
= vecload_pos
; i
< lastpos
; i
++)
31131 ready
[i
] = ready
[i
+ 1];
31132 ready
[lastpos
] = tmp
;
31134 return cached_can_issue_more
;
31138 else if (is_power9_pairable_vec_type (type
))
31140 /* Issued a vector operation. */
31141 if (vec_pairing
== 0)
31144 /* We issued a single vector insn, look for a vecload to pair it
31145 with. If one isn't found, try to pair another vector. */
31149 if (recog_memoized (ready
[pos
]) >= 0)
31151 type2
= get_attr_type (ready
[pos
]);
31152 if (type2
== TYPE_VECLOAD
)
31154 /* Found a vecload insn to pair with, move it to the
31155 end of the ready list so it is scheduled next. */
31157 for (i
= pos
; i
< lastpos
; i
++)
31158 ready
[i
] = ready
[i
+ 1];
31159 ready
[lastpos
] = tmp
;
31161 return cached_can_issue_more
;
31163 else if (is_power9_pairable_vec_type (type2
)
31165 /* Remember position of first vector insn seen. */
31172 /* Didn't find a vecload to pair with but did find a vector
31173 insn, move it to the end of the ready list. */
31174 tmp
= ready
[vec_pos
];
31175 for (i
= vec_pos
; i
< lastpos
; i
++)
31176 ready
[i
] = ready
[i
+ 1];
31177 ready
[lastpos
] = tmp
;
31179 return cached_can_issue_more
;
31184 /* We've either finished a vec/vecload pair, couldn't find an insn to
31185 continue the current pair, or the last insn had nothing to do with
31186 with pairing. In any case, reset the state. */
31190 return cached_can_issue_more
;
31193 /* We are about to begin issuing insns for this clock cycle. */
31196 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
31197 rtx_insn
**ready ATTRIBUTE_UNUSED
,
31198 int *pn_ready ATTRIBUTE_UNUSED
,
31199 int clock_var ATTRIBUTE_UNUSED
)
31201 int n_ready
= *pn_ready
;
31204 fprintf (dump
, "// rs6000_sched_reorder :\n");
31206 /* Reorder the ready list, if the second to last ready insn
31207 is a nonepipeline insn. */
31208 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
31210 if (is_nonpipeline_insn (ready
[n_ready
- 1])
31211 && (recog_memoized (ready
[n_ready
- 2]) > 0))
31212 /* Simply swap first two insns. */
31213 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
31216 if (rs6000_cpu
== PROCESSOR_POWER6
)
31217 load_store_pendulum
= 0;
31219 return rs6000_issue_rate ();
31222 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31225 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
31226 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
31229 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
31231 /* For Power6, we need to handle some special cases to try and keep the
31232 store queue from overflowing and triggering expensive flushes.
31234 This code monitors how load and store instructions are being issued
31235 and skews the ready list one way or the other to increase the likelihood
31236 that a desired instruction is issued at the proper time.
31238 A couple of things are done. First, we maintain a "load_store_pendulum"
31239 to track the current state of load/store issue.
31241 - If the pendulum is at zero, then no loads or stores have been
31242 issued in the current cycle so we do nothing.
31244 - If the pendulum is 1, then a single load has been issued in this
31245 cycle and we attempt to locate another load in the ready list to
31248 - If the pendulum is -2, then two stores have already been
31249 issued in this cycle, so we increase the priority of the first load
31250 in the ready list to increase it's likelihood of being chosen first
31253 - If the pendulum is -1, then a single store has been issued in this
31254 cycle and we attempt to locate another store in the ready list to
31255 issue with it, preferring a store to an adjacent memory location to
31256 facilitate store pairing in the store queue.
31258 - If the pendulum is 2, then two loads have already been
31259 issued in this cycle, so we increase the priority of the first store
31260 in the ready list to increase it's likelihood of being chosen first
31263 - If the pendulum < -2 or > 2, then do nothing.
31265 Note: This code covers the most common scenarios. There exist non
31266 load/store instructions which make use of the LSU and which
31267 would need to be accounted for to strictly model the behavior
31268 of the machine. Those instructions are currently unaccounted
31269 for to help minimize compile time overhead of this code.
31271 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31276 rtx load_mem
, str_mem
;
31278 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31279 /* Issuing a store, swing the load_store_pendulum to the left */
31280 load_store_pendulum
--;
31281 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31282 /* Issuing a load, swing the load_store_pendulum to the right */
31283 load_store_pendulum
++;
31285 return cached_can_issue_more
;
31287 /* If the pendulum is balanced, or there is only one instruction on
31288 the ready list, then all is well, so return. */
31289 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31290 return cached_can_issue_more
;
31292 if (load_store_pendulum
== 1)
31294 /* A load has been issued in this cycle. Scan the ready list
31295 for another load to issue with it */
31300 if (is_load_insn (ready
[pos
], &load_mem
))
31302 /* Found a load. Move it to the head of the ready list,
31303 and adjust it's priority so that it is more likely to
31306 for (i
=pos
; i
<*pn_ready
-1; i
++)
31307 ready
[i
] = ready
[i
+ 1];
31308 ready
[*pn_ready
-1] = tmp
;
31310 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31311 INSN_PRIORITY (tmp
)++;
31317 else if (load_store_pendulum
== -2)
31319 /* Two stores have been issued in this cycle. Increase the
31320 priority of the first load in the ready list to favor it for
31321 issuing in the next cycle. */
31326 if (is_load_insn (ready
[pos
], &load_mem
)
31328 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31330 INSN_PRIORITY (ready
[pos
])++;
31332 /* Adjust the pendulum to account for the fact that a load
31333 was found and increased in priority. This is to prevent
31334 increasing the priority of multiple loads */
31335 load_store_pendulum
--;
31342 else if (load_store_pendulum
== -1)
31344 /* A store has been issued in this cycle. Scan the ready list for
31345 another store to issue with it, preferring a store to an adjacent
31347 int first_store_pos
= -1;
31353 if (is_store_insn (ready
[pos
], &str_mem
))
31356 /* Maintain the index of the first store found on the
31358 if (first_store_pos
== -1)
31359 first_store_pos
= pos
;
31361 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31362 && adjacent_mem_locations (str_mem
, str_mem2
))
31364 /* Found an adjacent store. Move it to the head of the
31365 ready list, and adjust it's priority so that it is
31366 more likely to stay there */
31368 for (i
=pos
; i
<*pn_ready
-1; i
++)
31369 ready
[i
] = ready
[i
+ 1];
31370 ready
[*pn_ready
-1] = tmp
;
31372 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31373 INSN_PRIORITY (tmp
)++;
31375 first_store_pos
= -1;
31383 if (first_store_pos
>= 0)
31385 /* An adjacent store wasn't found, but a non-adjacent store was,
31386 so move the non-adjacent store to the front of the ready
31387 list, and adjust its priority so that it is more likely to
31389 tmp
= ready
[first_store_pos
];
31390 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31391 ready
[i
] = ready
[i
+ 1];
31392 ready
[*pn_ready
-1] = tmp
;
31393 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31394 INSN_PRIORITY (tmp
)++;
31397 else if (load_store_pendulum
== 2)
31399 /* Two loads have been issued in this cycle. Increase the priority
31400 of the first store in the ready list to favor it for issuing in
31406 if (is_store_insn (ready
[pos
], &str_mem
)
31408 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31410 INSN_PRIORITY (ready
[pos
])++;
31412 /* Adjust the pendulum to account for the fact that a store
31413 was found and increased in priority. This is to prevent
31414 increasing the priority of multiple stores */
31415 load_store_pendulum
++;
31424 /* Do Power9 dependent reordering if necessary. */
31425 if (rs6000_cpu
== PROCESSOR_POWER9
&& last_scheduled_insn
31426 && recog_memoized (last_scheduled_insn
) >= 0)
31427 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31429 return cached_can_issue_more
;
31432 /* Return whether the presence of INSN causes a dispatch group termination
31433 of group WHICH_GROUP.
31435 If WHICH_GROUP == current_group, this function will return true if INSN
31436 causes the termination of the current group (i.e, the dispatch group to
31437 which INSN belongs). This means that INSN will be the last insn in the
31438 group it belongs to.
31440 If WHICH_GROUP == previous_group, this function will return true if INSN
31441 causes the termination of the previous group (i.e, the dispatch group that
31442 precedes the group to which INSN belongs). This means that INSN will be
31443 the first insn in the group it belongs to). */
31446 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31453 first
= insn_must_be_first_in_group (insn
);
31454 last
= insn_must_be_last_in_group (insn
);
31459 if (which_group
== current_group
)
31461 else if (which_group
== previous_group
)
31469 insn_must_be_first_in_group (rtx_insn
*insn
)
31471 enum attr_type type
;
31475 || DEBUG_INSN_P (insn
)
31476 || GET_CODE (PATTERN (insn
)) == USE
31477 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31480 switch (rs6000_cpu
)
31482 case PROCESSOR_POWER5
:
31483 if (is_cracked_insn (insn
))
31486 case PROCESSOR_POWER4
:
31487 if (is_microcoded_insn (insn
))
31490 if (!rs6000_sched_groups
)
31493 type
= get_attr_type (insn
);
31500 case TYPE_DELAYED_CR
:
31501 case TYPE_CR_LOGICAL
:
31514 case PROCESSOR_POWER6
:
31515 type
= get_attr_type (insn
);
31524 case TYPE_FPCOMPARE
:
31535 if (get_attr_dot (insn
) == DOT_NO
31536 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31541 if (get_attr_size (insn
) == SIZE_32
)
31549 if (get_attr_update (insn
) == UPDATE_YES
)
31557 case PROCESSOR_POWER7
:
31558 type
= get_attr_type (insn
);
31562 case TYPE_CR_LOGICAL
:
31576 if (get_attr_dot (insn
) == DOT_YES
)
31581 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31582 || get_attr_update (insn
) == UPDATE_YES
)
31589 if (get_attr_update (insn
) == UPDATE_YES
)
31597 case PROCESSOR_POWER8
:
31598 type
= get_attr_type (insn
);
31602 case TYPE_CR_LOGICAL
:
31603 case TYPE_DELAYED_CR
:
31611 case TYPE_VECSTORE
:
31618 if (get_attr_dot (insn
) == DOT_YES
)
31623 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31624 || get_attr_update (insn
) == UPDATE_YES
)
31629 if (get_attr_update (insn
) == UPDATE_YES
31630 && get_attr_indexed (insn
) == INDEXED_YES
)
31646 insn_must_be_last_in_group (rtx_insn
*insn
)
31648 enum attr_type type
;
31652 || DEBUG_INSN_P (insn
)
31653 || GET_CODE (PATTERN (insn
)) == USE
31654 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31657 switch (rs6000_cpu
) {
31658 case PROCESSOR_POWER4
:
31659 case PROCESSOR_POWER5
:
31660 if (is_microcoded_insn (insn
))
31663 if (is_branch_slot_insn (insn
))
31667 case PROCESSOR_POWER6
:
31668 type
= get_attr_type (insn
);
31676 case TYPE_FPCOMPARE
:
31687 if (get_attr_dot (insn
) == DOT_NO
31688 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31693 if (get_attr_size (insn
) == SIZE_32
)
31701 case PROCESSOR_POWER7
:
31702 type
= get_attr_type (insn
);
31712 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31713 && get_attr_update (insn
) == UPDATE_YES
)
31718 if (get_attr_update (insn
) == UPDATE_YES
31719 && get_attr_indexed (insn
) == INDEXED_YES
)
31727 case PROCESSOR_POWER8
:
31728 type
= get_attr_type (insn
);
31740 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31741 && get_attr_update (insn
) == UPDATE_YES
)
31746 if (get_attr_update (insn
) == UPDATE_YES
31747 && get_attr_indexed (insn
) == INDEXED_YES
)
31762 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31763 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31766 is_costly_group (rtx
*group_insns
, rtx next_insn
)
31769 int issue_rate
= rs6000_issue_rate ();
31771 for (i
= 0; i
< issue_rate
; i
++)
31773 sd_iterator_def sd_it
;
31775 rtx insn
= group_insns
[i
];
31780 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
31782 rtx next
= DEP_CON (dep
);
31784 if (next
== next_insn
31785 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
31793 /* Utility of the function redefine_groups.
31794 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31795 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31796 to keep it "far" (in a separate group) from GROUP_INSNS, following
31797 one of the following schemes, depending on the value of the flag
31798 -minsert_sched_nops = X:
31799 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31800 in order to force NEXT_INSN into a separate group.
31801 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31802 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31803 insertion (has a group just ended, how many vacant issue slots remain in the
31804 last group, and how many dispatch groups were encountered so far). */
31807 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
31808 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
31813 int issue_rate
= rs6000_issue_rate ();
31814 bool end
= *group_end
;
31817 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
31818 return can_issue_more
;
31820 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
31821 return can_issue_more
;
31823 force
= is_costly_group (group_insns
, next_insn
);
31825 return can_issue_more
;
31827 if (sched_verbose
> 6)
31828 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
31829 *group_count
,can_issue_more
);
31831 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
31834 can_issue_more
= 0;
31836 /* Since only a branch can be issued in the last issue_slot, it is
31837 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31838 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31839 in this case the last nop will start a new group and the branch
31840 will be forced to the new group. */
31841 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
31844 /* Do we have a special group ending nop? */
31845 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
31846 || rs6000_cpu_attr
== CPU_POWER8
)
31848 nop
= gen_group_ending_nop ();
31849 emit_insn_before (nop
, next_insn
);
31850 can_issue_more
= 0;
31853 while (can_issue_more
> 0)
31856 emit_insn_before (nop
, next_insn
);
31864 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
31866 int n_nops
= rs6000_sched_insert_nops
;
31868 /* Nops can't be issued from the branch slot, so the effective
31869 issue_rate for nops is 'issue_rate - 1'. */
31870 if (can_issue_more
== 0)
31871 can_issue_more
= issue_rate
;
31873 if (can_issue_more
== 0)
31875 can_issue_more
= issue_rate
- 1;
31878 for (i
= 0; i
< issue_rate
; i
++)
31880 group_insns
[i
] = 0;
31887 emit_insn_before (nop
, next_insn
);
31888 if (can_issue_more
== issue_rate
- 1) /* new group begins */
31891 if (can_issue_more
== 0)
31893 can_issue_more
= issue_rate
- 1;
31896 for (i
= 0; i
< issue_rate
; i
++)
31898 group_insns
[i
] = 0;
31904 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31907 /* Is next_insn going to start a new group? */
31910 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
31911 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
31912 || (can_issue_more
< issue_rate
&&
31913 insn_terminates_group_p (next_insn
, previous_group
)));
31914 if (*group_end
&& end
)
31917 if (sched_verbose
> 6)
31918 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
31919 *group_count
, can_issue_more
);
31920 return can_issue_more
;
31923 return can_issue_more
;
31926 /* This function tries to synch the dispatch groups that the compiler "sees"
31927 with the dispatch groups that the processor dispatcher is expected to
31928 form in practice. It tries to achieve this synchronization by forcing the
31929 estimated processor grouping on the compiler (as opposed to the function
31930 'pad_goups' which tries to force the scheduler's grouping on the processor).
31932 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31933 examines the (estimated) dispatch groups that will be formed by the processor
31934 dispatcher. It marks these group boundaries to reflect the estimated
31935 processor grouping, overriding the grouping that the scheduler had marked.
31936 Depending on the value of the flag '-minsert-sched-nops' this function can
31937 force certain insns into separate groups or force a certain distance between
31938 them by inserting nops, for example, if there exists a "costly dependence"
31941 The function estimates the group boundaries that the processor will form as
31942 follows: It keeps track of how many vacant issue slots are available after
31943 each insn. A subsequent insn will start a new group if one of the following
31945 - no more vacant issue slots remain in the current dispatch group.
31946 - only the last issue slot, which is the branch slot, is vacant, but the next
31947 insn is not a branch.
31948 - only the last 2 or less issue slots, including the branch slot, are vacant,
31949 which means that a cracked insn (which occupies two issue slots) can't be
31950 issued in this group.
31951 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31952 start a new group. */
31955 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
31958 rtx_insn
*insn
, *next_insn
;
31960 int can_issue_more
;
31963 int group_count
= 0;
31967 issue_rate
= rs6000_issue_rate ();
31968 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
31969 for (i
= 0; i
< issue_rate
; i
++)
31971 group_insns
[i
] = 0;
31973 can_issue_more
= issue_rate
;
31975 insn
= get_next_active_insn (prev_head_insn
, tail
);
31978 while (insn
!= NULL_RTX
)
31980 slot
= (issue_rate
- can_issue_more
);
31981 group_insns
[slot
] = insn
;
31983 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
31984 if (insn_terminates_group_p (insn
, current_group
))
31985 can_issue_more
= 0;
31987 next_insn
= get_next_active_insn (insn
, tail
);
31988 if (next_insn
== NULL_RTX
)
31989 return group_count
+ 1;
31991 /* Is next_insn going to start a new group? */
31993 = (can_issue_more
== 0
31994 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
31995 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
31996 || (can_issue_more
< issue_rate
&&
31997 insn_terminates_group_p (next_insn
, previous_group
)));
31999 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
32000 next_insn
, &group_end
, can_issue_more
,
32006 can_issue_more
= 0;
32007 for (i
= 0; i
< issue_rate
; i
++)
32009 group_insns
[i
] = 0;
32013 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
32014 PUT_MODE (next_insn
, VOIDmode
);
32015 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
32016 PUT_MODE (next_insn
, TImode
);
32019 if (can_issue_more
== 0)
32020 can_issue_more
= issue_rate
;
32023 return group_count
;
32026 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32027 dispatch group boundaries that the scheduler had marked. Pad with nops
32028 any dispatch groups which have vacant issue slots, in order to force the
32029 scheduler's grouping on the processor dispatcher. The function
32030 returns the number of dispatch groups found. */
32033 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
32036 rtx_insn
*insn
, *next_insn
;
32039 int can_issue_more
;
32041 int group_count
= 0;
32043 /* Initialize issue_rate. */
32044 issue_rate
= rs6000_issue_rate ();
32045 can_issue_more
= issue_rate
;
32047 insn
= get_next_active_insn (prev_head_insn
, tail
);
32048 next_insn
= get_next_active_insn (insn
, tail
);
32050 while (insn
!= NULL_RTX
)
32053 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
32055 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
32057 if (next_insn
== NULL_RTX
)
32062 /* If the scheduler had marked group termination at this location
32063 (between insn and next_insn), and neither insn nor next_insn will
32064 force group termination, pad the group with nops to force group
32067 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32068 && !insn_terminates_group_p (insn
, current_group
)
32069 && !insn_terminates_group_p (next_insn
, previous_group
))
32071 if (!is_branch_slot_insn (next_insn
))
32074 while (can_issue_more
)
32077 emit_insn_before (nop
, next_insn
);
32082 can_issue_more
= issue_rate
;
32087 next_insn
= get_next_active_insn (insn
, tail
);
32090 return group_count
;
32093 /* We're beginning a new block. Initialize data structures as necessary. */
32096 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
32097 int sched_verbose ATTRIBUTE_UNUSED
,
32098 int max_ready ATTRIBUTE_UNUSED
)
32100 last_scheduled_insn
= NULL
;
32101 load_store_pendulum
= 0;
32106 /* The following function is called at the end of scheduling BB.
32107 After reload, it inserts nops at insn group bundling. */
32110 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
32115 fprintf (dump
, "=== Finishing schedule.\n");
32117 if (reload_completed
&& rs6000_sched_groups
)
32119 /* Do not run sched_finish hook when selective scheduling enabled. */
32120 if (sel_sched_p ())
32123 if (rs6000_sched_insert_nops
== sched_finish_none
)
32126 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
32127 n_groups
= pad_groups (dump
, sched_verbose
,
32128 current_sched_info
->prev_head
,
32129 current_sched_info
->next_tail
);
32131 n_groups
= redefine_groups (dump
, sched_verbose
,
32132 current_sched_info
->prev_head
,
32133 current_sched_info
->next_tail
);
32135 if (sched_verbose
>= 6)
32137 fprintf (dump
, "ngroups = %d\n", n_groups
);
32138 print_rtl (dump
, current_sched_info
->prev_head
);
32139 fprintf (dump
, "Done finish_sched\n");
32144 struct rs6000_sched_context
32146 short cached_can_issue_more
;
32147 rtx_insn
*last_scheduled_insn
;
32148 int load_store_pendulum
;
32153 typedef struct rs6000_sched_context rs6000_sched_context_def
;
32154 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
32156 /* Allocate store for new scheduling context. */
32158 rs6000_alloc_sched_context (void)
32160 return xmalloc (sizeof (rs6000_sched_context_def
));
32163 /* If CLEAN_P is true then initializes _SC with clean data,
32164 and from the global context otherwise. */
32166 rs6000_init_sched_context (void *_sc
, bool clean_p
)
32168 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32172 sc
->cached_can_issue_more
= 0;
32173 sc
->last_scheduled_insn
= NULL
;
32174 sc
->load_store_pendulum
= 0;
32175 sc
->divide_cnt
= 0;
32176 sc
->vec_pairing
= 0;
32180 sc
->cached_can_issue_more
= cached_can_issue_more
;
32181 sc
->last_scheduled_insn
= last_scheduled_insn
;
32182 sc
->load_store_pendulum
= load_store_pendulum
;
32183 sc
->divide_cnt
= divide_cnt
;
32184 sc
->vec_pairing
= vec_pairing
;
32188 /* Sets the global scheduling context to the one pointed to by _SC. */
32190 rs6000_set_sched_context (void *_sc
)
32192 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
32194 gcc_assert (sc
!= NULL
);
32196 cached_can_issue_more
= sc
->cached_can_issue_more
;
32197 last_scheduled_insn
= sc
->last_scheduled_insn
;
32198 load_store_pendulum
= sc
->load_store_pendulum
;
32199 divide_cnt
= sc
->divide_cnt
;
32200 vec_pairing
= sc
->vec_pairing
;
32205 rs6000_free_sched_context (void *_sc
)
32207 gcc_assert (_sc
!= NULL
);
32213 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
32215 switch (get_attr_type (insn
))
32230 /* Length in units of the trampoline for entering a nested function. */
32233 rs6000_trampoline_size (void)
32237 switch (DEFAULT_ABI
)
32240 gcc_unreachable ();
32243 ret
= (TARGET_32BIT
) ? 12 : 24;
32247 gcc_assert (!TARGET_32BIT
);
32253 ret
= (TARGET_32BIT
) ? 40 : 48;
32260 /* Emit RTL insns to initialize the variable parts of a trampoline.
32261 FNADDR is an RTX for the address of the function's pure code.
32262 CXT is an RTX for the static chain value for the function. */
32265 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
32267 int regsize
= (TARGET_32BIT
) ? 4 : 8;
32268 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
32269 rtx ctx_reg
= force_reg (Pmode
, cxt
);
32270 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
32272 switch (DEFAULT_ABI
)
32275 gcc_unreachable ();
32277 /* Under AIX, just build the 3 word function descriptor */
32280 rtx fnmem
, fn_reg
, toc_reg
;
32282 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32283 error ("you cannot take the address of a nested function if you use "
32284 "the %qs option", "-mno-pointers-to-nested-functions");
32286 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32287 fn_reg
= gen_reg_rtx (Pmode
);
32288 toc_reg
= gen_reg_rtx (Pmode
);
32290 /* Macro to shorten the code expansions below. */
32291 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32293 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32295 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32296 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32297 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32298 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32299 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32305 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32309 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32310 LCT_NORMAL
, VOIDmode
, 4,
32312 GEN_INT (rs6000_trampoline_size ()), SImode
,
32320 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32321 identifier as an argument, so the front end shouldn't look it up. */
32324 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32326 return is_attribute_p ("altivec", attr_id
);
32329 /* Handle the "altivec" attribute. The attribute may have
32330 arguments as follows:
32332 __attribute__((altivec(vector__)))
32333 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32334 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32336 and may appear more than once (e.g., 'vector bool char') in a
32337 given declaration. */
32340 rs6000_handle_altivec_attribute (tree
*node
,
32341 tree name ATTRIBUTE_UNUSED
,
32343 int flags ATTRIBUTE_UNUSED
,
32344 bool *no_add_attrs
)
32346 tree type
= *node
, result
= NULL_TREE
;
32350 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32351 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32352 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32355 while (POINTER_TYPE_P (type
)
32356 || TREE_CODE (type
) == FUNCTION_TYPE
32357 || TREE_CODE (type
) == METHOD_TYPE
32358 || TREE_CODE (type
) == ARRAY_TYPE
)
32359 type
= TREE_TYPE (type
);
32361 mode
= TYPE_MODE (type
);
32363 /* Check for invalid AltiVec type qualifiers. */
32364 if (type
== long_double_type_node
)
32365 error ("use of %<long double%> in AltiVec types is invalid");
32366 else if (type
== boolean_type_node
)
32367 error ("use of boolean types in AltiVec types is invalid");
32368 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32369 error ("use of %<complex%> in AltiVec types is invalid");
32370 else if (DECIMAL_FLOAT_MODE_P (mode
))
32371 error ("use of decimal floating point types in AltiVec types is invalid");
32372 else if (!TARGET_VSX
)
32374 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32377 error ("use of %<long%> in AltiVec types is invalid for "
32378 "64-bit code without %qs", "-mvsx");
32379 else if (rs6000_warn_altivec_long
)
32380 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32383 else if (type
== long_long_unsigned_type_node
32384 || type
== long_long_integer_type_node
)
32385 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32387 else if (type
== double_type_node
)
32388 error ("use of %<double%> in AltiVec types is invalid without %qs",
32392 switch (altivec_type
)
32395 unsigned_p
= TYPE_UNSIGNED (type
);
32399 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32402 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32405 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32408 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32411 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32413 case SFmode
: result
= V4SF_type_node
; break;
32414 case DFmode
: result
= V2DF_type_node
; break;
32415 /* If the user says 'vector int bool', we may be handed the 'bool'
32416 attribute _before_ the 'vector' attribute, and so select the
32417 proper type in the 'b' case below. */
32418 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
32419 case V2DImode
: case V2DFmode
:
32427 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
32428 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
32429 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
32430 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
32437 case V8HImode
: result
= pixel_V8HI_type_node
;
32443 /* Propagate qualifiers attached to the element type
32444 onto the vector type. */
32445 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32446 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32448 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32451 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32456 /* AltiVec defines four built-in scalar types that serve as vector
32457 elements; we must teach the compiler how to mangle them. */
32459 static const char *
32460 rs6000_mangle_type (const_tree type
)
32462 type
= TYPE_MAIN_VARIANT (type
);
32464 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32465 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32468 if (type
== bool_char_type_node
) return "U6__boolc";
32469 if (type
== bool_short_type_node
) return "U6__bools";
32470 if (type
== pixel_type_node
) return "u7__pixel";
32471 if (type
== bool_int_type_node
) return "U6__booli";
32472 if (type
== bool_long_type_node
) return "U6__booll";
32474 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32475 "g" for IBM extended double, no matter whether it is long double (using
32476 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32477 if (TARGET_FLOAT128_TYPE
)
32479 if (type
== ieee128_float_type_node
)
32480 return "U10__float128";
32482 if (type
== ibm128_float_type_node
)
32485 if (type
== long_double_type_node
&& TARGET_LONG_DOUBLE_128
)
32486 return (TARGET_IEEEQUAD
) ? "U10__float128" : "g";
32489 /* Mangle IBM extended float long double as `g' (__float128) on
32490 powerpc*-linux where long-double-64 previously was the default. */
32491 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
32493 && TARGET_LONG_DOUBLE_128
32494 && !TARGET_IEEEQUAD
)
32497 /* For all other types, use normal C++ mangling. */
32501 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32502 struct attribute_spec.handler. */
32505 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32506 tree args ATTRIBUTE_UNUSED
,
32507 int flags ATTRIBUTE_UNUSED
,
32508 bool *no_add_attrs
)
32510 if (TREE_CODE (*node
) != FUNCTION_TYPE
32511 && TREE_CODE (*node
) != FIELD_DECL
32512 && TREE_CODE (*node
) != TYPE_DECL
)
32514 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32516 *no_add_attrs
= true;
32522 /* Set longcall attributes on all functions declared when
32523 rs6000_default_long_calls is true. */
32525 rs6000_set_default_type_attributes (tree type
)
32527 if (rs6000_default_long_calls
32528 && (TREE_CODE (type
) == FUNCTION_TYPE
32529 || TREE_CODE (type
) == METHOD_TYPE
))
32530 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32532 TYPE_ATTRIBUTES (type
));
32535 darwin_set_default_type_attributes (type
);
32539 /* Return a reference suitable for calling a function with the
32540 longcall attribute. */
32543 rs6000_longcall_ref (rtx call_ref
)
32545 const char *call_name
;
32548 if (GET_CODE (call_ref
) != SYMBOL_REF
)
32551 /* System V adds '.' to the internal name, so skip them. */
32552 call_name
= XSTR (call_ref
, 0);
32553 if (*call_name
== '.')
32555 while (*call_name
== '.')
32558 node
= get_identifier (call_name
);
32559 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
32562 return force_reg (Pmode
, call_ref
);
32565 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32566 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32569 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32570 struct attribute_spec.handler. */
32572 rs6000_handle_struct_attribute (tree
*node
, tree name
,
32573 tree args ATTRIBUTE_UNUSED
,
32574 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
32577 if (DECL_P (*node
))
32579 if (TREE_CODE (*node
) == TYPE_DECL
)
32580 type
= &TREE_TYPE (*node
);
32585 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
32586 || TREE_CODE (*type
) == UNION_TYPE
)))
32588 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
32589 *no_add_attrs
= true;
32592 else if ((is_attribute_p ("ms_struct", name
)
32593 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
32594 || ((is_attribute_p ("gcc_struct", name
)
32595 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
32597 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
32599 *no_add_attrs
= true;
32606 rs6000_ms_bitfield_layout_p (const_tree record_type
)
32608 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
32609 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
32610 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
32613 #ifdef USING_ELFOS_H
32615 /* A get_unnamed_section callback, used for switching to toc_section. */
32618 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
32620 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32621 && TARGET_MINIMAL_TOC
)
32623 if (!toc_initialized
)
32625 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32626 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32627 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
32628 fprintf (asm_out_file
, "\t.tc ");
32629 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
32630 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32631 fprintf (asm_out_file
, "\n");
32633 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32634 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32635 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32636 fprintf (asm_out_file
, " = .+32768\n");
32637 toc_initialized
= 1;
32640 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32642 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32644 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32645 if (!toc_initialized
)
32647 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32648 toc_initialized
= 1;
32653 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32654 if (!toc_initialized
)
32656 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32657 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32658 fprintf (asm_out_file
, " = .+32768\n");
32659 toc_initialized
= 1;
32664 /* Implement TARGET_ASM_INIT_SECTIONS. */
32667 rs6000_elf_asm_init_sections (void)
32670 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
32673 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
32674 SDATA2_SECTION_ASM_OP
);
32677 /* Implement TARGET_SELECT_RTX_SECTION. */
32680 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
32681 unsigned HOST_WIDE_INT align
)
32683 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
32684 return toc_section
;
32686 return default_elf_select_rtx_section (mode
, x
, align
);
32689 /* For a SYMBOL_REF, set generic flags and then perform some
32690 target-specific processing.
32692 When the AIX ABI is requested on a non-AIX system, replace the
32693 function name with the real name (with a leading .) rather than the
32694 function descriptor name. This saves a lot of overriding code to
32695 read the prefixes. */
32697 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
32699 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
32701 default_encode_section_info (decl
, rtl
, first
);
32704 && TREE_CODE (decl
) == FUNCTION_DECL
32706 && DEFAULT_ABI
== ABI_AIX
)
32708 rtx sym_ref
= XEXP (rtl
, 0);
32709 size_t len
= strlen (XSTR (sym_ref
, 0));
32710 char *str
= XALLOCAVEC (char, len
+ 2);
32712 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
32713 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
32718 compare_section_name (const char *section
, const char *templ
)
32722 len
= strlen (templ
);
32723 return (strncmp (section
, templ
, len
) == 0
32724 && (section
[len
] == 0 || section
[len
] == '.'));
32728 rs6000_elf_in_small_data_p (const_tree decl
)
32730 if (rs6000_sdata
== SDATA_NONE
)
32733 /* We want to merge strings, so we never consider them small data. */
32734 if (TREE_CODE (decl
) == STRING_CST
)
32737 /* Functions are never in the small data area. */
32738 if (TREE_CODE (decl
) == FUNCTION_DECL
)
32741 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
32743 const char *section
= DECL_SECTION_NAME (decl
);
32744 if (compare_section_name (section
, ".sdata")
32745 || compare_section_name (section
, ".sdata2")
32746 || compare_section_name (section
, ".gnu.linkonce.s")
32747 || compare_section_name (section
, ".sbss")
32748 || compare_section_name (section
, ".sbss2")
32749 || compare_section_name (section
, ".gnu.linkonce.sb")
32750 || strcmp (section
, ".PPC.EMB.sdata0") == 0
32751 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
32756 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
32759 && size
<= g_switch_value
32760 /* If it's not public, and we're not going to reference it there,
32761 there's no need to put it in the small data section. */
32762 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
32769 #endif /* USING_ELFOS_H */
32771 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32774 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
32776 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
32779 /* Do not place thread-local symbols refs in the object blocks. */
32782 rs6000_use_blocks_for_decl_p (const_tree decl
)
32784 return !DECL_THREAD_LOCAL_P (decl
);
32787 /* Return a REG that occurs in ADDR with coefficient 1.
32788 ADDR can be effectively incremented by incrementing REG.
32790 r0 is special and we must not select it as an address
32791 register by this routine since our caller will try to
32792 increment the returned register via an "la" instruction. */
32795 find_addr_reg (rtx addr
)
32797 while (GET_CODE (addr
) == PLUS
)
32799 if (GET_CODE (XEXP (addr
, 0)) == REG
32800 && REGNO (XEXP (addr
, 0)) != 0)
32801 addr
= XEXP (addr
, 0);
32802 else if (GET_CODE (XEXP (addr
, 1)) == REG
32803 && REGNO (XEXP (addr
, 1)) != 0)
32804 addr
= XEXP (addr
, 1);
32805 else if (CONSTANT_P (XEXP (addr
, 0)))
32806 addr
= XEXP (addr
, 1);
32807 else if (CONSTANT_P (XEXP (addr
, 1)))
32808 addr
= XEXP (addr
, 0);
32810 gcc_unreachable ();
32812 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
32817 rs6000_fatal_bad_address (rtx op
)
32819 fatal_insn ("bad address", op
);
32824 typedef struct branch_island_d
{
32825 tree function_name
;
32831 static vec
<branch_island
, va_gc
> *branch_islands
;
32833 /* Remember to generate a branch island for far calls to the given
32837 add_compiler_branch_island (tree label_name
, tree function_name
,
32840 branch_island bi
= {function_name
, label_name
, line_number
};
32841 vec_safe_push (branch_islands
, bi
);
32844 /* Generate far-jump branch islands for everything recorded in
32845 branch_islands. Invoked immediately after the last instruction of
32846 the epilogue has been emitted; the branch islands must be appended
32847 to, and contiguous with, the function body. Mach-O stubs are
32848 generated in machopic_output_stub(). */
32851 macho_branch_islands (void)
32855 while (!vec_safe_is_empty (branch_islands
))
32857 branch_island
*bi
= &branch_islands
->last ();
32858 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
32859 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
32860 char name_buf
[512];
32861 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32862 if (name
[0] == '*' || name
[0] == '&')
32863 strcpy (name_buf
, name
+1);
32867 strcpy (name_buf
+1, name
);
32869 strcpy (tmp_buf
, "\n");
32870 strcat (tmp_buf
, label
);
32871 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32872 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
32873 dbxout_stabd (N_SLINE
, bi
->line_number
);
32874 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32877 if (TARGET_LINK_STACK
)
32880 get_ppc476_thunk_name (name
);
32881 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
32882 strcat (tmp_buf
, name
);
32883 strcat (tmp_buf
, "\n");
32884 strcat (tmp_buf
, label
);
32885 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
32889 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
32890 strcat (tmp_buf
, label
);
32891 strcat (tmp_buf
, "_pic\n");
32892 strcat (tmp_buf
, label
);
32893 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
32896 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
32897 strcat (tmp_buf
, name_buf
);
32898 strcat (tmp_buf
, " - ");
32899 strcat (tmp_buf
, label
);
32900 strcat (tmp_buf
, "_pic)\n");
32902 strcat (tmp_buf
, "\tmtlr r0\n");
32904 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
32905 strcat (tmp_buf
, name_buf
);
32906 strcat (tmp_buf
, " - ");
32907 strcat (tmp_buf
, label
);
32908 strcat (tmp_buf
, "_pic)\n");
32910 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
32914 strcat (tmp_buf
, ":\nlis r12,hi16(");
32915 strcat (tmp_buf
, name_buf
);
32916 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
32917 strcat (tmp_buf
, name_buf
);
32918 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
32920 output_asm_insn (tmp_buf
, 0);
32921 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32922 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
32923 dbxout_stabd (N_SLINE
, bi
->line_number
);
32924 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32925 branch_islands
->pop ();
32929 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32930 already there or not. */
32933 no_previous_def (tree function_name
)
32938 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
32939 if (function_name
== bi
->function_name
)
32944 /* GET_PREV_LABEL gets the label name from the previous definition of
32948 get_prev_label (tree function_name
)
32953 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
32954 if (function_name
== bi
->function_name
)
32955 return bi
->label_name
;
32959 /* INSN is either a function call or a millicode call. It may have an
32960 unconditional jump in its delay slot.
32962 CALL_DEST is the routine we are calling. */
32965 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
32966 int cookie_operand_number
)
32968 static char buf
[256];
32969 if (darwin_emit_branch_islands
32970 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
32971 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
32974 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
32976 if (no_previous_def (funname
))
32978 rtx label_rtx
= gen_label_rtx ();
32979 char *label_buf
, temp_buf
[256];
32980 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
32981 CODE_LABEL_NUMBER (label_rtx
));
32982 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
32983 labelname
= get_identifier (label_buf
);
32984 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
32987 labelname
= get_prev_label (funname
);
32989 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32990 instruction will reach 'foo', otherwise link as 'bl L42'".
32991 "L42" should be a 'branch island', that will do a far jump to
32992 'foo'. Branch islands are generated in
32993 macho_branch_islands(). */
32994 sprintf (buf
, "jbsr %%z%d,%.246s",
32995 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
32998 sprintf (buf
, "bl %%z%d", dest_operand_number
);
33002 /* Generate PIC and indirect symbol stubs. */
33005 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
33007 unsigned int length
;
33008 char *symbol_name
, *lazy_ptr_name
;
33009 char *local_label_0
;
33010 static int label
= 0;
33012 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33013 symb
= (*targetm
.strip_name_encoding
) (symb
);
33016 length
= strlen (symb
);
33017 symbol_name
= XALLOCAVEC (char, length
+ 32);
33018 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
33020 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
33021 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
33024 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
33026 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
33030 fprintf (file
, "\t.align 5\n");
33032 fprintf (file
, "%s:\n", stub
);
33033 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33036 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33037 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
33039 fprintf (file
, "\tmflr r0\n");
33040 if (TARGET_LINK_STACK
)
33043 get_ppc476_thunk_name (name
);
33044 fprintf (file
, "\tbl %s\n", name
);
33045 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33049 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
33050 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
33052 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
33053 lazy_ptr_name
, local_label_0
);
33054 fprintf (file
, "\tmtlr r0\n");
33055 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
33056 (TARGET_64BIT
? "ldu" : "lwzu"),
33057 lazy_ptr_name
, local_label_0
);
33058 fprintf (file
, "\tmtctr r12\n");
33059 fprintf (file
, "\tbctr\n");
33063 fprintf (file
, "\t.align 4\n");
33065 fprintf (file
, "%s:\n", stub
);
33066 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33068 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
33069 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
33070 (TARGET_64BIT
? "ldu" : "lwzu"),
33072 fprintf (file
, "\tmtctr r12\n");
33073 fprintf (file
, "\tbctr\n");
33076 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
33077 fprintf (file
, "%s:\n", lazy_ptr_name
);
33078 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
33079 fprintf (file
, "%sdyld_stub_binding_helper\n",
33080 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
33083 /* Legitimize PIC addresses. If the address is already
33084 position-independent, we return ORIG. Newly generated
33085 position-independent addresses go into a reg. This is REG if non
33086 zero, otherwise we allocate register(s) as necessary. */
33088 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33091 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
33096 if (reg
== NULL
&& !reload_completed
)
33097 reg
= gen_reg_rtx (Pmode
);
33099 if (GET_CODE (orig
) == CONST
)
33103 if (GET_CODE (XEXP (orig
, 0)) == PLUS
33104 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
33107 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
33109 /* Use a different reg for the intermediate value, as
33110 it will be marked UNCHANGING. */
33111 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
33112 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
33115 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
33118 if (GET_CODE (offset
) == CONST_INT
)
33120 if (SMALL_INT (offset
))
33121 return plus_constant (Pmode
, base
, INTVAL (offset
));
33122 else if (!reload_completed
)
33123 offset
= force_reg (Pmode
, offset
);
33126 rtx mem
= force_const_mem (Pmode
, orig
);
33127 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
33130 return gen_rtx_PLUS (Pmode
, base
, offset
);
33133 /* Fall back on generic machopic code. */
33134 return machopic_legitimize_pic_address (orig
, mode
, reg
);
33137 /* Output a .machine directive for the Darwin assembler, and call
33138 the generic start_file routine. */
33141 rs6000_darwin_file_start (void)
33143 static const struct
33147 HOST_WIDE_INT if_set
;
33149 { "ppc64", "ppc64", MASK_64BIT
},
33150 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
33151 { "power4", "ppc970", 0 },
33152 { "G5", "ppc970", 0 },
33153 { "7450", "ppc7450", 0 },
33154 { "7400", "ppc7400", MASK_ALTIVEC
},
33155 { "G4", "ppc7400", 0 },
33156 { "750", "ppc750", 0 },
33157 { "740", "ppc750", 0 },
33158 { "G3", "ppc750", 0 },
33159 { "604e", "ppc604e", 0 },
33160 { "604", "ppc604", 0 },
33161 { "603e", "ppc603", 0 },
33162 { "603", "ppc603", 0 },
33163 { "601", "ppc601", 0 },
33164 { NULL
, "ppc", 0 } };
33165 const char *cpu_id
= "";
33168 rs6000_file_start ();
33169 darwin_file_start ();
33171 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33173 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
33174 cpu_id
= rs6000_default_cpu
;
33176 if (global_options_set
.x_rs6000_cpu_index
)
33177 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
33179 /* Look through the mapping array. Pick the first name that either
33180 matches the argument, has a bit set in IF_SET that is also set
33181 in the target flags, or has a NULL name. */
33184 while (mapping
[i
].arg
!= NULL
33185 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
33186 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
33189 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
33192 #endif /* TARGET_MACHO */
33196 rs6000_elf_reloc_rw_mask (void)
33200 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
33206 /* Record an element in the table of global constructors. SYMBOL is
33207 a SYMBOL_REF of the function to be called; PRIORITY is a number
33208 between 0 and MAX_INIT_PRIORITY.
33210 This differs from default_named_section_asm_out_constructor in
33211 that we have special handling for -mrelocatable. */
33213 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
33215 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
33217 const char *section
= ".ctors";
33220 if (priority
!= DEFAULT_INIT_PRIORITY
)
33222 sprintf (buf
, ".ctors.%.5u",
33223 /* Invert the numbering so the linker puts us in the proper
33224 order; constructors are run from right to left, and the
33225 linker sorts in increasing order. */
33226 MAX_INIT_PRIORITY
- priority
);
33230 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33231 assemble_align (POINTER_SIZE
);
33233 if (DEFAULT_ABI
== ABI_V4
33234 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33236 fputs ("\t.long (", asm_out_file
);
33237 output_addr_const (asm_out_file
, symbol
);
33238 fputs (")@fixup\n", asm_out_file
);
33241 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33244 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
33246 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
33248 const char *section
= ".dtors";
33251 if (priority
!= DEFAULT_INIT_PRIORITY
)
33253 sprintf (buf
, ".dtors.%.5u",
33254 /* Invert the numbering so the linker puts us in the proper
33255 order; constructors are run from right to left, and the
33256 linker sorts in increasing order. */
33257 MAX_INIT_PRIORITY
- priority
);
33261 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
33262 assemble_align (POINTER_SIZE
);
33264 if (DEFAULT_ABI
== ABI_V4
33265 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
33267 fputs ("\t.long (", asm_out_file
);
33268 output_addr_const (asm_out_file
, symbol
);
33269 fputs (")@fixup\n", asm_out_file
);
33272 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
33276 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
33278 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
33280 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
33281 ASM_OUTPUT_LABEL (file
, name
);
33282 fputs (DOUBLE_INT_ASM_OP
, file
);
33283 rs6000_output_function_entry (file
, name
);
33284 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33287 fputs ("\t.size\t", file
);
33288 assemble_name (file
, name
);
33289 fputs (",24\n\t.type\t.", file
);
33290 assemble_name (file
, name
);
33291 fputs (",@function\n", file
);
33292 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33294 fputs ("\t.globl\t.", file
);
33295 assemble_name (file
, name
);
33300 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33301 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33302 rs6000_output_function_entry (file
, name
);
33303 fputs (":\n", file
);
33307 if (DEFAULT_ABI
== ABI_V4
33308 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33309 && !TARGET_SECURE_PLT
33310 && (!constant_pool_empty_p () || crtl
->profile
)
33315 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33317 fprintf (file
, "\t.long ");
33318 assemble_name (file
, toc_label_name
);
33321 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33322 assemble_name (file
, buf
);
33326 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33327 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33329 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33333 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33335 fprintf (file
, "\t.quad .TOC.-");
33336 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33337 assemble_name (file
, buf
);
33341 if (DEFAULT_ABI
== ABI_AIX
)
33343 const char *desc_name
, *orig_name
;
33345 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33346 desc_name
= orig_name
;
33347 while (*desc_name
== '.')
33350 if (TREE_PUBLIC (decl
))
33351 fprintf (file
, "\t.globl %s\n", desc_name
);
33353 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33354 fprintf (file
, "%s:\n", desc_name
);
33355 fprintf (file
, "\t.long %s\n", orig_name
);
33356 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33357 fputs ("\t.long 0\n", file
);
33358 fprintf (file
, "\t.previous\n");
33360 ASM_OUTPUT_LABEL (file
, name
);
33363 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33365 rs6000_elf_file_end (void)
33367 #ifdef HAVE_AS_GNU_ATTRIBUTE
33368 /* ??? The value emitted depends on options active at file end.
33369 Assume anyone using #pragma or attributes that might change
33370 options knows what they are doing. */
33371 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33372 && rs6000_passes_float
)
33378 else if (TARGET_SF_FPR
)
33382 if (rs6000_passes_long_double
)
33384 if (!TARGET_LONG_DOUBLE_128
)
33386 else if (TARGET_IEEEQUAD
)
33391 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33393 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33395 if (rs6000_passes_vector
)
33396 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33397 (TARGET_ALTIVEC_ABI
? 2 : 1));
33398 if (rs6000_returns_struct
)
33399 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33400 aix_struct_return
? 2 : 1);
33403 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33404 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33405 file_end_indicate_exec_stack ();
33408 if (flag_split_stack
)
33409 file_end_indicate_split_stack ();
33413 /* We have expanded a CPU builtin, so we need to emit a reference to
33414 the special symbol that LIBC uses to declare it supports the
33415 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33416 switch_to_section (data_section
);
33417 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33418 fprintf (asm_out_file
, "\t%s %s\n",
33419 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33426 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33427 #define HAVE_XCOFF_DWARF_EXTRAS 0
33430 static enum unwind_info_type
33431 rs6000_xcoff_debug_unwind_info (void)
33437 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33441 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33442 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33443 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33444 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33445 fprintf (asm_out_file
, ",");
33446 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33447 fprintf (asm_out_file
, "\n");
33451 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33453 fputs (GLOBAL_ASM_OP
, stream
);
33454 RS6000_OUTPUT_BASENAME (stream
, name
);
33455 putc ('\n', stream
);
33458 /* A get_unnamed_decl callback, used for read-only sections. PTR
33459 points to the section string variable. */
33462 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33464 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33465 *(const char *const *) directive
,
33466 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33469 /* Likewise for read-write sections. */
33472 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33474 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33475 *(const char *const *) directive
,
33476 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33480 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33482 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33483 *(const char *const *) directive
,
33484 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33487 /* A get_unnamed_section callback, used for switching to toc_section. */
33490 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33492 if (TARGET_MINIMAL_TOC
)
33494 /* toc_section is always selected at least once from
33495 rs6000_xcoff_file_start, so this is guaranteed to
33496 always be defined once and only once in each file. */
33497 if (!toc_initialized
)
33499 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33500 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33501 toc_initialized
= 1;
33503 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33504 (TARGET_32BIT
? "" : ",3"));
33507 fputs ("\t.toc\n", asm_out_file
);
33510 /* Implement TARGET_ASM_INIT_SECTIONS. */
33513 rs6000_xcoff_asm_init_sections (void)
33515 read_only_data_section
33516 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33517 &xcoff_read_only_section_name
);
33519 private_data_section
33520 = get_unnamed_section (SECTION_WRITE
,
33521 rs6000_xcoff_output_readwrite_section_asm_op
,
33522 &xcoff_private_data_section_name
);
33525 = get_unnamed_section (SECTION_TLS
,
33526 rs6000_xcoff_output_tls_section_asm_op
,
33527 &xcoff_tls_data_section_name
);
33529 tls_private_data_section
33530 = get_unnamed_section (SECTION_TLS
,
33531 rs6000_xcoff_output_tls_section_asm_op
,
33532 &xcoff_private_data_section_name
);
33534 read_only_private_data_section
33535 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33536 &xcoff_private_data_section_name
);
33539 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
33541 readonly_data_section
= read_only_data_section
;
33545 rs6000_xcoff_reloc_rw_mask (void)
33551 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
33552 tree decl ATTRIBUTE_UNUSED
)
33555 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
33557 if (flags
& SECTION_EXCLUDE
)
33559 else if (flags
& SECTION_DEBUG
)
33561 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
33564 else if (flags
& SECTION_CODE
)
33566 else if (flags
& SECTION_TLS
)
33568 else if (flags
& SECTION_WRITE
)
33573 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
33574 (flags
& SECTION_CODE
) ? "." : "",
33575 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
33578 #define IN_NAMED_SECTION(DECL) \
33579 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33580 && DECL_SECTION_NAME (DECL) != NULL)
33583 rs6000_xcoff_select_section (tree decl
, int reloc
,
33584 unsigned HOST_WIDE_INT align
)
33586 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33588 if (align
> BIGGEST_ALIGNMENT
)
33590 resolve_unique_section (decl
, reloc
, true);
33591 if (IN_NAMED_SECTION (decl
))
33592 return get_named_section (decl
, NULL
, reloc
);
33595 if (decl_readonly_section (decl
, reloc
))
33597 if (TREE_PUBLIC (decl
))
33598 return read_only_data_section
;
33600 return read_only_private_data_section
;
33605 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
33607 if (TREE_PUBLIC (decl
))
33608 return tls_data_section
;
33609 else if (bss_initializer_p (decl
))
33611 /* Convert to COMMON to emit in BSS. */
33612 DECL_COMMON (decl
) = 1;
33613 return tls_comm_section
;
33616 return tls_private_data_section
;
33620 if (TREE_PUBLIC (decl
))
33621 return data_section
;
33623 return private_data_section
;
33628 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
33632 /* Use select_section for private data and uninitialized data with
33633 alignment <= BIGGEST_ALIGNMENT. */
33634 if (!TREE_PUBLIC (decl
)
33635 || DECL_COMMON (decl
)
33636 || (DECL_INITIAL (decl
) == NULL_TREE
33637 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
33638 || DECL_INITIAL (decl
) == error_mark_node
33639 || (flag_zero_initialized_in_bss
33640 && initializer_zerop (DECL_INITIAL (decl
))))
33643 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
33644 name
= (*targetm
.strip_name_encoding
) (name
);
33645 set_decl_section_name (decl
, name
);
33648 /* Select section for constant in constant pool.
33650 On RS/6000, all constants are in the private read-only data area.
33651 However, if this is being placed in the TOC it must be output as a
33655 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
33656 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
33658 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33659 return toc_section
;
33661 return read_only_private_data_section
;
33664 /* Remove any trailing [DS] or the like from the symbol name. */
33666 static const char *
33667 rs6000_xcoff_strip_name_encoding (const char *name
)
33672 len
= strlen (name
);
33673 if (name
[len
- 1] == ']')
33674 return ggc_alloc_string (name
, len
- 4);
33679 /* Section attributes. AIX is always PIC. */
33681 static unsigned int
33682 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
33684 unsigned int align
;
33685 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
33687 /* Align to at least UNIT size. */
33688 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
33689 align
= MIN_UNITS_PER_WORD
;
33691 /* Increase alignment of large objects if not already stricter. */
33692 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
33693 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
33694 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
33696 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
33699 /* Output at beginning of assembler file.
33701 Initialize the section names for the RS/6000 at this point.
33703 Specify filename, including full path, to assembler.
33705 We want to go into the TOC section so at least one .toc will be emitted.
33706 Also, in order to output proper .bs/.es pairs, we need at least one static
33707 [RW] section emitted.
33709 Finally, declare mcount when profiling to make the assembler happy. */
33712 rs6000_xcoff_file_start (void)
33714 rs6000_gen_section_name (&xcoff_bss_section_name
,
33715 main_input_filename
, ".bss_");
33716 rs6000_gen_section_name (&xcoff_private_data_section_name
,
33717 main_input_filename
, ".rw_");
33718 rs6000_gen_section_name (&xcoff_read_only_section_name
,
33719 main_input_filename
, ".ro_");
33720 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
33721 main_input_filename
, ".tls_");
33722 rs6000_gen_section_name (&xcoff_tbss_section_name
,
33723 main_input_filename
, ".tbss_[UL]");
33725 fputs ("\t.file\t", asm_out_file
);
33726 output_quoted_string (asm_out_file
, main_input_filename
);
33727 fputc ('\n', asm_out_file
);
33728 if (write_symbols
!= NO_DEBUG
)
33729 switch_to_section (private_data_section
);
33730 switch_to_section (toc_section
);
33731 switch_to_section (text_section
);
33733 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
33734 rs6000_file_start ();
33737 /* Output at end of assembler file.
33738 On the RS/6000, referencing data should automatically pull in text. */
33741 rs6000_xcoff_file_end (void)
33743 switch_to_section (text_section
);
33744 fputs ("_section_.text:\n", asm_out_file
);
33745 switch_to_section (data_section
);
33746 fputs (TARGET_32BIT
33747 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33751 struct declare_alias_data
33754 bool function_descriptor
;
33757 /* Declare alias N. A helper function for for_node_and_aliases. */
33760 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
33762 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
33763 /* Main symbol is output specially, because varasm machinery does part of
33764 the job for us - we do not need to declare .globl/lglobs and such. */
33765 if (!n
->alias
|| n
->weakref
)
33768 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
33771 /* Prevent assemble_alias from trying to use .set pseudo operation
33772 that does not behave as expected by the middle-end. */
33773 TREE_ASM_WRITTEN (n
->decl
) = true;
33775 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
33776 char *buffer
= (char *) alloca (strlen (name
) + 2);
33778 int dollar_inside
= 0;
33780 strcpy (buffer
, name
);
33781 p
= strchr (buffer
, '$');
33785 p
= strchr (p
+ 1, '$');
33787 if (TREE_PUBLIC (n
->decl
))
33789 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
33791 if (dollar_inside
) {
33792 if (data
->function_descriptor
)
33793 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33794 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33796 if (data
->function_descriptor
)
33798 fputs ("\t.globl .", data
->file
);
33799 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33800 putc ('\n', data
->file
);
33802 fputs ("\t.globl ", data
->file
);
33803 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33804 putc ('\n', data
->file
);
33806 #ifdef ASM_WEAKEN_DECL
33807 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
33808 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
33815 if (data
->function_descriptor
)
33816 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33817 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33819 if (data
->function_descriptor
)
33821 fputs ("\t.lglobl .", data
->file
);
33822 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33823 putc ('\n', data
->file
);
33825 fputs ("\t.lglobl ", data
->file
);
33826 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33827 putc ('\n', data
->file
);
33829 if (data
->function_descriptor
)
33830 fputs (".", data
->file
);
33831 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33832 fputs (":\n", data
->file
);
33837 #ifdef HAVE_GAS_HIDDEN
33838 /* Helper function to calculate visibility of a DECL
33839 and return the value as a const string. */
33841 static const char *
33842 rs6000_xcoff_visibility (tree decl
)
33844 static const char * const visibility_types
[] = {
33845 "", ",protected", ",hidden", ",internal"
33848 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
33850 if (TREE_CODE (decl
) == FUNCTION_DECL
33851 && cgraph_node::get (decl
)
33852 && cgraph_node::get (decl
)->instrumentation_clone
33853 && cgraph_node::get (decl
)->instrumented_version
)
33854 vis
= DECL_VISIBILITY (cgraph_node::get (decl
)->instrumented_version
->decl
);
33856 return visibility_types
[vis
];
33861 /* This macro produces the initial definition of a function name.
33862 On the RS/6000, we need to place an extra '.' in the function name and
33863 output the function descriptor.
33864 Dollar signs are converted to underscores.
33866 The csect for the function will have already been created when
33867 text_section was selected. We do have to go back to that csect, however.
33869 The third and fourth parameters to the .function pseudo-op (16 and 044)
33870 are placeholders which no longer have any use.
33872 Because AIX assembler's .set command has unexpected semantics, we output
33873 all aliases as alternative labels in front of the definition. */
33876 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
33878 char *buffer
= (char *) alloca (strlen (name
) + 1);
33880 int dollar_inside
= 0;
33881 struct declare_alias_data data
= {file
, false};
33883 strcpy (buffer
, name
);
33884 p
= strchr (buffer
, '$');
33888 p
= strchr (p
+ 1, '$');
33890 if (TREE_PUBLIC (decl
))
33892 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
33894 if (dollar_inside
) {
33895 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33896 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33898 fputs ("\t.globl .", file
);
33899 RS6000_OUTPUT_BASENAME (file
, buffer
);
33900 #ifdef HAVE_GAS_HIDDEN
33901 fputs (rs6000_xcoff_visibility (decl
), file
);
33908 if (dollar_inside
) {
33909 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33910 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33912 fputs ("\t.lglobl .", file
);
33913 RS6000_OUTPUT_BASENAME (file
, buffer
);
33916 fputs ("\t.csect ", file
);
33917 RS6000_OUTPUT_BASENAME (file
, buffer
);
33918 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
33919 RS6000_OUTPUT_BASENAME (file
, buffer
);
33920 fputs (":\n", file
);
33921 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33923 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
33924 RS6000_OUTPUT_BASENAME (file
, buffer
);
33925 fputs (", TOC[tc0], 0\n", file
);
33927 switch_to_section (function_section (decl
));
33929 RS6000_OUTPUT_BASENAME (file
, buffer
);
33930 fputs (":\n", file
);
33931 data
.function_descriptor
= true;
33932 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33934 if (!DECL_IGNORED_P (decl
))
33936 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33937 xcoffout_declare_function (file
, decl
, buffer
);
33938 else if (write_symbols
== DWARF2_DEBUG
)
33940 name
= (*targetm
.strip_name_encoding
) (name
);
33941 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
33948 /* Output assembly language to globalize a symbol from a DECL,
33949 possibly with visibility. */
33952 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
33954 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
33955 fputs (GLOBAL_ASM_OP
, stream
);
33956 RS6000_OUTPUT_BASENAME (stream
, name
);
33957 #ifdef HAVE_GAS_HIDDEN
33958 fputs (rs6000_xcoff_visibility (decl
), stream
);
33960 putc ('\n', stream
);
33963 /* Output assembly language to define a symbol as COMMON from a DECL,
33964 possibly with visibility. */
33967 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
33968 tree decl ATTRIBUTE_UNUSED
,
33970 unsigned HOST_WIDE_INT size
,
33971 unsigned HOST_WIDE_INT align
)
33973 unsigned HOST_WIDE_INT align2
= 2;
33976 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
33980 fputs (COMMON_ASM_OP
, stream
);
33981 RS6000_OUTPUT_BASENAME (stream
, name
);
33984 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
33987 #ifdef HAVE_GAS_HIDDEN
33988 fputs (rs6000_xcoff_visibility (decl
), stream
);
33990 putc ('\n', stream
);
33993 /* This macro produces the initial definition of a object (variable) name.
33994 Because AIX assembler's .set command has unexpected semantics, we output
33995 all aliases as alternative labels in front of the definition. */
33998 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
34000 struct declare_alias_data data
= {file
, false};
34001 RS6000_OUTPUT_BASENAME (file
, name
);
34002 fputs (":\n", file
);
34003 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
34007 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34010 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
34012 fputs (integer_asm_op (size
, FALSE
), file
);
34013 assemble_name (file
, label
);
34014 fputs ("-$", file
);
34017 /* Output a symbol offset relative to the dbase for the current object.
34018 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34021 __gcc_unwind_dbase is embedded in all executables/libraries through
34022 libgcc/config/rs6000/crtdbase.S. */
34025 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
34027 fputs (integer_asm_op (size
, FALSE
), file
);
34028 assemble_name (file
, label
);
34029 fputs("-__gcc_unwind_dbase", file
);
34034 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
34038 const char *symname
;
34040 default_encode_section_info (decl
, rtl
, first
);
34042 /* Careful not to prod global register variables. */
34045 symbol
= XEXP (rtl
, 0);
34046 if (GET_CODE (symbol
) != SYMBOL_REF
)
34049 flags
= SYMBOL_REF_FLAGS (symbol
);
34051 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
34052 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
34054 SYMBOL_REF_FLAGS (symbol
) = flags
;
34056 /* Append mapping class to extern decls. */
34057 symname
= XSTR (symbol
, 0);
34058 if (decl
/* sync condition with assemble_external () */
34059 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
34060 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
34061 || TREE_CODE (decl
) == FUNCTION_DECL
)
34062 && symname
[strlen (symname
) - 1] != ']')
34064 char *newname
= (char *) alloca (strlen (symname
) + 5);
34065 strcpy (newname
, symname
);
34066 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
34067 ? "[DS]" : "[UA]"));
34068 XSTR (symbol
, 0) = ggc_strdup (newname
);
34071 #endif /* HAVE_AS_TLS */
34072 #endif /* TARGET_XCOFF */
34075 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
34076 const char *name
, const char *val
)
34078 fputs ("\t.weak\t", stream
);
34079 RS6000_OUTPUT_BASENAME (stream
, name
);
34080 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34081 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34084 fputs ("[DS]", stream
);
34085 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34087 fputs (rs6000_xcoff_visibility (decl
), stream
);
34089 fputs ("\n\t.weak\t.", stream
);
34090 RS6000_OUTPUT_BASENAME (stream
, name
);
34092 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34094 fputs (rs6000_xcoff_visibility (decl
), stream
);
34096 fputc ('\n', stream
);
34099 #ifdef ASM_OUTPUT_DEF
34100 ASM_OUTPUT_DEF (stream
, name
, val
);
34102 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
34103 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
34105 fputs ("\t.set\t.", stream
);
34106 RS6000_OUTPUT_BASENAME (stream
, name
);
34107 fputs (",.", stream
);
34108 RS6000_OUTPUT_BASENAME (stream
, val
);
34109 fputc ('\n', stream
);
34115 /* Return true if INSN should not be copied. */
34118 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
34120 return recog_memoized (insn
) >= 0
34121 && get_attr_cannot_copy (insn
);
34124 /* Compute a (partial) cost for rtx X. Return true if the complete
34125 cost has been computed, and false if subexpressions should be
34126 scanned. In either case, *TOTAL contains the cost result. */
34129 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34130 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
34132 int code
= GET_CODE (x
);
34136 /* On the RS/6000, if it is valid in the insn, it is free. */
34138 if (((outer_code
== SET
34139 || outer_code
== PLUS
34140 || outer_code
== MINUS
)
34141 && (satisfies_constraint_I (x
)
34142 || satisfies_constraint_L (x
)))
34143 || (outer_code
== AND
34144 && (satisfies_constraint_K (x
)
34146 ? satisfies_constraint_L (x
)
34147 : satisfies_constraint_J (x
))))
34148 || ((outer_code
== IOR
|| outer_code
== XOR
)
34149 && (satisfies_constraint_K (x
)
34151 ? satisfies_constraint_L (x
)
34152 : satisfies_constraint_J (x
))))
34153 || outer_code
== ASHIFT
34154 || outer_code
== ASHIFTRT
34155 || outer_code
== LSHIFTRT
34156 || outer_code
== ROTATE
34157 || outer_code
== ROTATERT
34158 || outer_code
== ZERO_EXTRACT
34159 || (outer_code
== MULT
34160 && satisfies_constraint_I (x
))
34161 || ((outer_code
== DIV
|| outer_code
== UDIV
34162 || outer_code
== MOD
|| outer_code
== UMOD
)
34163 && exact_log2 (INTVAL (x
)) >= 0)
34164 || (outer_code
== COMPARE
34165 && (satisfies_constraint_I (x
)
34166 || satisfies_constraint_K (x
)))
34167 || ((outer_code
== EQ
|| outer_code
== NE
)
34168 && (satisfies_constraint_I (x
)
34169 || satisfies_constraint_K (x
)
34171 ? satisfies_constraint_L (x
)
34172 : satisfies_constraint_J (x
))))
34173 || (outer_code
== GTU
34174 && satisfies_constraint_I (x
))
34175 || (outer_code
== LTU
34176 && satisfies_constraint_P (x
)))
34181 else if ((outer_code
== PLUS
34182 && reg_or_add_cint_operand (x
, VOIDmode
))
34183 || (outer_code
== MINUS
34184 && reg_or_sub_cint_operand (x
, VOIDmode
))
34185 || ((outer_code
== SET
34186 || outer_code
== IOR
34187 || outer_code
== XOR
)
34189 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
34191 *total
= COSTS_N_INSNS (1);
34197 case CONST_WIDE_INT
:
34201 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34205 /* When optimizing for size, MEM should be slightly more expensive
34206 than generating address, e.g., (plus (reg) (const)).
34207 L1 cache latency is about two instructions. */
34208 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34209 if (SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (x
)))
34210 *total
+= COSTS_N_INSNS (100);
34219 if (FLOAT_MODE_P (mode
))
34220 *total
= rs6000_cost
->fp
;
34222 *total
= COSTS_N_INSNS (1);
34226 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34227 && satisfies_constraint_I (XEXP (x
, 1)))
34229 if (INTVAL (XEXP (x
, 1)) >= -256
34230 && INTVAL (XEXP (x
, 1)) <= 255)
34231 *total
= rs6000_cost
->mulsi_const9
;
34233 *total
= rs6000_cost
->mulsi_const
;
34235 else if (mode
== SFmode
)
34236 *total
= rs6000_cost
->fp
;
34237 else if (FLOAT_MODE_P (mode
))
34238 *total
= rs6000_cost
->dmul
;
34239 else if (mode
== DImode
)
34240 *total
= rs6000_cost
->muldi
;
34242 *total
= rs6000_cost
->mulsi
;
34246 if (mode
== SFmode
)
34247 *total
= rs6000_cost
->fp
;
34249 *total
= rs6000_cost
->dmul
;
34254 if (FLOAT_MODE_P (mode
))
34256 *total
= mode
== DFmode
? rs6000_cost
->ddiv
34257 : rs6000_cost
->sdiv
;
34264 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
34265 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
34267 if (code
== DIV
|| code
== MOD
)
34269 *total
= COSTS_N_INSNS (2);
34272 *total
= COSTS_N_INSNS (1);
34276 if (GET_MODE (XEXP (x
, 1)) == DImode
)
34277 *total
= rs6000_cost
->divdi
;
34279 *total
= rs6000_cost
->divsi
;
34281 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34282 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
34283 *total
+= COSTS_N_INSNS (2);
34287 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34291 *total
= COSTS_N_INSNS (4);
34295 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34299 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34303 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34306 *total
= COSTS_N_INSNS (1);
34310 if (CONST_INT_P (XEXP (x
, 1)))
34312 rtx left
= XEXP (x
, 0);
34313 rtx_code left_code
= GET_CODE (left
);
34315 /* rotate-and-mask: 1 insn. */
34316 if ((left_code
== ROTATE
34317 || left_code
== ASHIFT
34318 || left_code
== LSHIFTRT
)
34319 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34321 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34322 if (!CONST_INT_P (XEXP (left
, 1)))
34323 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34324 *total
+= COSTS_N_INSNS (1);
34328 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34329 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34330 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34331 || (val
& 0xffff) == val
34332 || (val
& 0xffff0000) == val
34333 || ((val
& 0xffff) == 0 && mode
== SImode
))
34335 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34336 *total
+= COSTS_N_INSNS (1);
34341 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34343 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34344 *total
+= COSTS_N_INSNS (2);
34349 *total
= COSTS_N_INSNS (1);
34354 *total
= COSTS_N_INSNS (1);
34360 *total
= COSTS_N_INSNS (1);
34364 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34365 the sign extend and shift separately within the insn. */
34366 if (TARGET_EXTSWSLI
&& mode
== DImode
34367 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34368 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34379 /* Handle mul_highpart. */
34380 if (outer_code
== TRUNCATE
34381 && GET_CODE (XEXP (x
, 0)) == MULT
)
34383 if (mode
== DImode
)
34384 *total
= rs6000_cost
->muldi
;
34386 *total
= rs6000_cost
->mulsi
;
34389 else if (outer_code
== AND
)
34392 *total
= COSTS_N_INSNS (1);
34397 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34400 *total
= COSTS_N_INSNS (1);
34406 if (!FLOAT_MODE_P (mode
))
34408 *total
= COSTS_N_INSNS (1);
34414 case UNSIGNED_FLOAT
:
34417 case FLOAT_TRUNCATE
:
34418 *total
= rs6000_cost
->fp
;
34422 if (mode
== DFmode
)
34423 *total
= rs6000_cost
->sfdf_convert
;
34425 *total
= rs6000_cost
->fp
;
34429 switch (XINT (x
, 1))
34432 *total
= rs6000_cost
->fp
;
34444 *total
= COSTS_N_INSNS (1);
34447 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34449 *total
= rs6000_cost
->fp
;
34458 /* Carry bit requires mode == Pmode.
34459 NEG or PLUS already counted so only add one. */
34461 && (outer_code
== NEG
|| outer_code
== PLUS
))
34463 *total
= COSTS_N_INSNS (1);
34466 if (outer_code
== SET
)
34468 if (XEXP (x
, 1) == const0_rtx
)
34470 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34471 *total
= COSTS_N_INSNS (8);
34473 *total
= COSTS_N_INSNS (2);
34478 *total
= COSTS_N_INSNS (3);
34487 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
34489 if (TARGET_ISEL
&& !TARGET_MFCRF
)
34490 *total
= COSTS_N_INSNS (8);
34492 *total
= COSTS_N_INSNS (2);
34496 if (outer_code
== COMPARE
)
34510 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34513 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34514 int opno
, int *total
, bool speed
)
34516 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34519 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34520 "opno = %d, total = %d, speed = %s, x:\n",
34521 ret
? "complete" : "scan inner",
34522 GET_MODE_NAME (mode
),
34523 GET_RTX_NAME (outer_code
),
34526 speed
? "true" : "false");
34533 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34536 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
34537 addr_space_t as
, bool speed
)
34539 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
34541 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34542 ret
, speed
? "true" : "false");
34549 /* A C expression returning the cost of moving data from a register of class
34550 CLASS1 to one of CLASS2. */
34553 rs6000_register_move_cost (machine_mode mode
,
34554 reg_class_t from
, reg_class_t to
)
34558 if (TARGET_DEBUG_COST
)
34561 /* Moves from/to GENERAL_REGS. */
34562 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
34563 || reg_classes_intersect_p (from
, GENERAL_REGS
))
34565 reg_class_t rclass
= from
;
34567 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
34570 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
34571 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
34572 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
34574 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34576 else if (rclass
== CR_REGS
)
34579 /* For those processors that have slow LR/CTR moves, make them more
34580 expensive than memory in order to bias spills to memory .*/
34581 else if ((rs6000_cpu
== PROCESSOR_POWER6
34582 || rs6000_cpu
== PROCESSOR_POWER7
34583 || rs6000_cpu
== PROCESSOR_POWER8
34584 || rs6000_cpu
== PROCESSOR_POWER9
)
34585 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
34586 ret
= 6 * hard_regno_nregs
[0][mode
];
34589 /* A move will cost one instruction per GPR moved. */
34590 ret
= 2 * hard_regno_nregs
[0][mode
];
34593 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34594 else if (VECTOR_MEM_VSX_P (mode
)
34595 && reg_classes_intersect_p (to
, VSX_REGS
)
34596 && reg_classes_intersect_p (from
, VSX_REGS
))
34597 ret
= 2 * hard_regno_nregs
[FIRST_FPR_REGNO
][mode
];
34599 /* Moving between two similar registers is just one instruction. */
34600 else if (reg_classes_intersect_p (to
, from
))
34601 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
34603 /* Everything else has to go through GENERAL_REGS. */
34605 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
34606 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
34608 if (TARGET_DEBUG_COST
)
34610 if (dbg_cost_ctrl
== 1)
34612 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34613 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
34614 reg_class_names
[to
]);
34621 /* A C expressions returning the cost of moving data of MODE from a register to
34625 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
34626 bool in ATTRIBUTE_UNUSED
)
34630 if (TARGET_DEBUG_COST
)
34633 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
34634 ret
= 4 * hard_regno_nregs
[0][mode
];
34635 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
34636 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
34637 ret
= 4 * hard_regno_nregs
[32][mode
];
34638 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
34639 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
34641 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
34643 if (TARGET_DEBUG_COST
)
34645 if (dbg_cost_ctrl
== 1)
34647 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34648 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
34655 /* Returns a code for a target-specific builtin that implements
34656 reciprocal of the function, or NULL_TREE if not available. */
34659 rs6000_builtin_reciprocal (tree fndecl
)
34661 switch (DECL_FUNCTION_CODE (fndecl
))
34663 case VSX_BUILTIN_XVSQRTDP
:
34664 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
34667 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
34669 case VSX_BUILTIN_XVSQRTSP
:
34670 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
34673 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
34680 /* Load up a constant. If the mode is a vector mode, splat the value across
34681 all of the vector elements. */
34684 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
34688 if (mode
== SFmode
|| mode
== DFmode
)
34690 rtx d
= const_double_from_real_value (dconst
, mode
);
34691 reg
= force_reg (mode
, d
);
34693 else if (mode
== V4SFmode
)
34695 rtx d
= const_double_from_real_value (dconst
, SFmode
);
34696 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
34697 reg
= gen_reg_rtx (mode
);
34698 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34700 else if (mode
== V2DFmode
)
34702 rtx d
= const_double_from_real_value (dconst
, DFmode
);
34703 rtvec v
= gen_rtvec (2, d
, d
);
34704 reg
= gen_reg_rtx (mode
);
34705 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34708 gcc_unreachable ();
34713 /* Generate an FMA instruction. */
34716 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
34718 machine_mode mode
= GET_MODE (target
);
34721 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
34722 gcc_assert (dst
!= NULL
);
34725 emit_move_insn (target
, dst
);
34728 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34731 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
34733 machine_mode mode
= GET_MODE (dst
);
34736 /* This is a tad more complicated, since the fnma_optab is for
34737 a different expression: fma(-m1, m2, a), which is the same
34738 thing except in the case of signed zeros.
34740 Fortunately we know that if FMA is supported that FNMSUB is
34741 also supported in the ISA. Just expand it directly. */
34743 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
34745 r
= gen_rtx_NEG (mode
, a
);
34746 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
34747 r
= gen_rtx_NEG (mode
, r
);
34748 emit_insn (gen_rtx_SET (dst
, r
));
34751 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34752 add a reg_note saying that this was a division. Support both scalar and
34753 vector divide. Assumes no trapping math and finite arguments. */
34756 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
34758 machine_mode mode
= GET_MODE (dst
);
34759 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
34762 /* Low precision estimates guarantee 5 bits of accuracy. High
34763 precision estimates guarantee 14 bits of accuracy. SFmode
34764 requires 23 bits of accuracy. DFmode requires 52 bits of
34765 accuracy. Each pass at least doubles the accuracy, leading
34766 to the following. */
34767 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34768 if (mode
== DFmode
|| mode
== V2DFmode
)
34771 enum insn_code code
= optab_handler (smul_optab
, mode
);
34772 insn_gen_fn gen_mul
= GEN_FCN (code
);
34774 gcc_assert (code
!= CODE_FOR_nothing
);
34776 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
34778 /* x0 = 1./d estimate */
34779 x0
= gen_reg_rtx (mode
);
34780 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
34783 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34786 /* e0 = 1. - d * x0 */
34787 e0
= gen_reg_rtx (mode
);
34788 rs6000_emit_nmsub (e0
, d
, x0
, one
);
34790 /* x1 = x0 + e0 * x0 */
34791 x1
= gen_reg_rtx (mode
);
34792 rs6000_emit_madd (x1
, e0
, x0
, x0
);
34794 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
34795 ++i
, xprev
= xnext
, eprev
= enext
) {
34797 /* enext = eprev * eprev */
34798 enext
= gen_reg_rtx (mode
);
34799 emit_insn (gen_mul (enext
, eprev
, eprev
));
34801 /* xnext = xprev + enext * xprev */
34802 xnext
= gen_reg_rtx (mode
);
34803 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
34809 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34811 /* u = n * xprev */
34812 u
= gen_reg_rtx (mode
);
34813 emit_insn (gen_mul (u
, n
, xprev
));
34815 /* v = n - (d * u) */
34816 v
= gen_reg_rtx (mode
);
34817 rs6000_emit_nmsub (v
, d
, u
, n
);
34819 /* dst = (v * xprev) + u */
34820 rs6000_emit_madd (dst
, v
, xprev
, u
);
34823 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
34826 /* Goldschmidt's Algorithm for single/double-precision floating point
34827 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34830 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
34832 machine_mode mode
= GET_MODE (src
);
34833 rtx e
= gen_reg_rtx (mode
);
34834 rtx g
= gen_reg_rtx (mode
);
34835 rtx h
= gen_reg_rtx (mode
);
34837 /* Low precision estimates guarantee 5 bits of accuracy. High
34838 precision estimates guarantee 14 bits of accuracy. SFmode
34839 requires 23 bits of accuracy. DFmode requires 52 bits of
34840 accuracy. Each pass at least doubles the accuracy, leading
34841 to the following. */
34842 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34843 if (mode
== DFmode
|| mode
== V2DFmode
)
34848 enum insn_code code
= optab_handler (smul_optab
, mode
);
34849 insn_gen_fn gen_mul
= GEN_FCN (code
);
34851 gcc_assert (code
!= CODE_FOR_nothing
);
34853 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
34855 /* e = rsqrt estimate */
34856 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
34859 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34862 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
34864 if (mode
== SFmode
)
34866 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
34869 emit_move_insn (e
, target
);
34873 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
34874 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
34878 /* g = sqrt estimate. */
34879 emit_insn (gen_mul (g
, e
, src
));
34880 /* h = 1/(2*sqrt) estimate. */
34881 emit_insn (gen_mul (h
, e
, mhalf
));
34887 rtx t
= gen_reg_rtx (mode
);
34888 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
34889 /* Apply correction directly to 1/rsqrt estimate. */
34890 rs6000_emit_madd (dst
, e
, t
, e
);
34894 for (i
= 0; i
< passes
; i
++)
34896 rtx t1
= gen_reg_rtx (mode
);
34897 rtx g1
= gen_reg_rtx (mode
);
34898 rtx h1
= gen_reg_rtx (mode
);
34900 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
34901 rs6000_emit_madd (g1
, g
, t1
, g
);
34902 rs6000_emit_madd (h1
, h
, t1
, h
);
34907 /* Multiply by 2 for 1/rsqrt. */
34908 emit_insn (gen_add3_insn (dst
, h
, h
));
34913 rtx t
= gen_reg_rtx (mode
);
34914 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
34915 rs6000_emit_madd (dst
, g
, t
, g
);
34921 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34922 (Power7) targets. DST is the target, and SRC is the argument operand. */
34925 rs6000_emit_popcount (rtx dst
, rtx src
)
34927 machine_mode mode
= GET_MODE (dst
);
34930 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34931 if (TARGET_POPCNTD
)
34933 if (mode
== SImode
)
34934 emit_insn (gen_popcntdsi2 (dst
, src
));
34936 emit_insn (gen_popcntddi2 (dst
, src
));
34940 tmp1
= gen_reg_rtx (mode
);
34942 if (mode
== SImode
)
34944 emit_insn (gen_popcntbsi2 (tmp1
, src
));
34945 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
34947 tmp2
= force_reg (SImode
, tmp2
);
34948 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
34952 emit_insn (gen_popcntbdi2 (tmp1
, src
));
34953 tmp2
= expand_mult (DImode
, tmp1
,
34954 GEN_INT ((HOST_WIDE_INT
)
34955 0x01010101 << 32 | 0x01010101),
34957 tmp2
= force_reg (DImode
, tmp2
);
34958 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
34963 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34964 target, and SRC is the argument operand. */
34967 rs6000_emit_parity (rtx dst
, rtx src
)
34969 machine_mode mode
= GET_MODE (dst
);
34972 tmp
= gen_reg_rtx (mode
);
34974 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34977 if (mode
== SImode
)
34979 emit_insn (gen_popcntbsi2 (tmp
, src
));
34980 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
34984 emit_insn (gen_popcntbdi2 (tmp
, src
));
34985 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
34990 if (mode
== SImode
)
34992 /* Is mult+shift >= shift+xor+shift+xor? */
34993 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
34995 rtx tmp1
, tmp2
, tmp3
, tmp4
;
34997 tmp1
= gen_reg_rtx (SImode
);
34998 emit_insn (gen_popcntbsi2 (tmp1
, src
));
35000 tmp2
= gen_reg_rtx (SImode
);
35001 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
35002 tmp3
= gen_reg_rtx (SImode
);
35003 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
35005 tmp4
= gen_reg_rtx (SImode
);
35006 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
35007 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
35010 rs6000_emit_popcount (tmp
, src
);
35011 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
35015 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35016 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
35018 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
35020 tmp1
= gen_reg_rtx (DImode
);
35021 emit_insn (gen_popcntbdi2 (tmp1
, src
));
35023 tmp2
= gen_reg_rtx (DImode
);
35024 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
35025 tmp3
= gen_reg_rtx (DImode
);
35026 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
35028 tmp4
= gen_reg_rtx (DImode
);
35029 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
35030 tmp5
= gen_reg_rtx (DImode
);
35031 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
35033 tmp6
= gen_reg_rtx (DImode
);
35034 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
35035 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
35038 rs6000_emit_popcount (tmp
, src
);
35039 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
35043 /* Expand an Altivec constant permutation for little endian mode.
35044 There are two issues: First, the two input operands must be
35045 swapped so that together they form a double-wide array in LE
35046 order. Second, the vperm instruction has surprising behavior
35047 in LE mode: it interprets the elements of the source vectors
35048 in BE mode ("left to right") and interprets the elements of
35049 the destination vector in LE mode ("right to left"). To
35050 correct for this, we must subtract each element of the permute
35051 control vector from 31.
35053 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35054 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35055 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35056 serve as the permute control vector. Then, in BE mode,
35060 places the desired result in vr9. However, in LE mode the
35061 vector contents will be
35063 vr10 = 00000003 00000002 00000001 00000000
35064 vr11 = 00000007 00000006 00000005 00000004
35066 The result of the vperm using the same permute control vector is
35068 vr9 = 05000000 07000000 01000000 03000000
35070 That is, the leftmost 4 bytes of vr10 are interpreted as the
35071 source for the rightmost 4 bytes of vr9, and so on.
35073 If we change the permute control vector to
35075 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35083 vr9 = 00000006 00000004 00000002 00000000. */
35086 altivec_expand_vec_perm_const_le (rtx operands
[4])
35090 rtx constv
, unspec
;
35091 rtx target
= operands
[0];
35092 rtx op0
= operands
[1];
35093 rtx op1
= operands
[2];
35094 rtx sel
= operands
[3];
35096 /* Unpack and adjust the constant selector. */
35097 for (i
= 0; i
< 16; ++i
)
35099 rtx e
= XVECEXP (sel
, 0, i
);
35100 unsigned int elt
= 31 - (INTVAL (e
) & 31);
35101 perm
[i
] = GEN_INT (elt
);
35104 /* Expand to a permute, swapping the inputs and using the
35105 adjusted selector. */
35107 op0
= force_reg (V16QImode
, op0
);
35109 op1
= force_reg (V16QImode
, op1
);
35111 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
35112 constv
= force_reg (V16QImode
, constv
);
35113 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
35115 if (!REG_P (target
))
35117 rtx tmp
= gen_reg_rtx (V16QImode
);
35118 emit_move_insn (tmp
, unspec
);
35122 emit_move_insn (target
, unspec
);
35125 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35126 permute control vector. But here it's not a constant, so we must
35127 generate a vector NAND or NOR to do the adjustment. */
35130 altivec_expand_vec_perm_le (rtx operands
[4])
35132 rtx notx
, iorx
, unspec
;
35133 rtx target
= operands
[0];
35134 rtx op0
= operands
[1];
35135 rtx op1
= operands
[2];
35136 rtx sel
= operands
[3];
35138 rtx norreg
= gen_reg_rtx (V16QImode
);
35139 machine_mode mode
= GET_MODE (target
);
35141 /* Get everything in regs so the pattern matches. */
35143 op0
= force_reg (mode
, op0
);
35145 op1
= force_reg (mode
, op1
);
35147 sel
= force_reg (V16QImode
, sel
);
35148 if (!REG_P (target
))
35149 tmp
= gen_reg_rtx (mode
);
35151 if (TARGET_P9_VECTOR
)
35153 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op0
, op1
, sel
),
35158 /* Invert the selector with a VNAND if available, else a VNOR.
35159 The VNAND is preferred for future fusion opportunities. */
35160 notx
= gen_rtx_NOT (V16QImode
, sel
);
35161 iorx
= (TARGET_P8_VECTOR
35162 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
35163 : gen_rtx_AND (V16QImode
, notx
, notx
));
35164 emit_insn (gen_rtx_SET (norreg
, iorx
));
35166 /* Permute with operands reversed and adjusted selector. */
35167 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
35171 /* Copy into target, possibly by way of a register. */
35172 if (!REG_P (target
))
35174 emit_move_insn (tmp
, unspec
);
35178 emit_move_insn (target
, unspec
);
35181 /* Expand an Altivec constant permutation. Return true if we match
35182 an efficient implementation; false to fall back to VPERM. */
35185 altivec_expand_vec_perm_const (rtx operands
[4])
35187 struct altivec_perm_insn
{
35188 HOST_WIDE_INT mask
;
35189 enum insn_code impl
;
35190 unsigned char perm
[16];
35192 static const struct altivec_perm_insn patterns
[] = {
35193 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
35194 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35195 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
35196 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35197 { OPTION_MASK_ALTIVEC
,
35198 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
35199 : CODE_FOR_altivec_vmrglb_direct
),
35200 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35201 { OPTION_MASK_ALTIVEC
,
35202 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
35203 : CODE_FOR_altivec_vmrglh_direct
),
35204 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35205 { OPTION_MASK_ALTIVEC
,
35206 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
35207 : CODE_FOR_altivec_vmrglw_direct
),
35208 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35209 { OPTION_MASK_ALTIVEC
,
35210 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
35211 : CODE_FOR_altivec_vmrghb_direct
),
35212 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35213 { OPTION_MASK_ALTIVEC
,
35214 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35215 : CODE_FOR_altivec_vmrghh_direct
),
35216 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35217 { OPTION_MASK_ALTIVEC
,
35218 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35219 : CODE_FOR_altivec_vmrghw_direct
),
35220 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35221 { OPTION_MASK_P8_VECTOR
,
35222 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35223 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35224 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35225 { OPTION_MASK_P8_VECTOR
,
35226 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35227 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35228 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35231 unsigned int i
, j
, elt
, which
;
35232 unsigned char perm
[16];
35233 rtx target
, op0
, op1
, sel
, x
;
35236 target
= operands
[0];
35241 /* Unpack the constant selector. */
35242 for (i
= which
= 0; i
< 16; ++i
)
35244 rtx e
= XVECEXP (sel
, 0, i
);
35245 elt
= INTVAL (e
) & 31;
35246 which
|= (elt
< 16 ? 1 : 2);
35250 /* Simplify the constant selector based on operands. */
35254 gcc_unreachable ();
35258 if (!rtx_equal_p (op0
, op1
))
35263 for (i
= 0; i
< 16; ++i
)
35275 /* Look for splat patterns. */
35280 for (i
= 0; i
< 16; ++i
)
35281 if (perm
[i
] != elt
)
35285 if (!BYTES_BIG_ENDIAN
)
35287 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35293 for (i
= 0; i
< 16; i
+= 2)
35294 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35298 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35299 x
= gen_reg_rtx (V8HImode
);
35300 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35302 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35309 for (i
= 0; i
< 16; i
+= 4)
35311 || perm
[i
+ 1] != elt
+ 1
35312 || perm
[i
+ 2] != elt
+ 2
35313 || perm
[i
+ 3] != elt
+ 3)
35317 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35318 x
= gen_reg_rtx (V4SImode
);
35319 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35321 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35327 /* Look for merge and pack patterns. */
35328 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35332 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35335 elt
= patterns
[j
].perm
[0];
35336 if (perm
[0] == elt
)
35338 else if (perm
[0] == elt
+ 16)
35342 for (i
= 1; i
< 16; ++i
)
35344 elt
= patterns
[j
].perm
[i
];
35346 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35347 else if (one_vec
&& elt
>= 16)
35349 if (perm
[i
] != elt
)
35354 enum insn_code icode
= patterns
[j
].impl
;
35355 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35356 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35358 /* For little-endian, don't use vpkuwum and vpkuhum if the
35359 underlying vector type is not V4SI and V8HI, respectively.
35360 For example, using vpkuwum with a V8HI picks up the even
35361 halfwords (BE numbering) when the even halfwords (LE
35362 numbering) are what we need. */
35363 if (!BYTES_BIG_ENDIAN
35364 && icode
== CODE_FOR_altivec_vpkuwum_direct
35365 && ((GET_CODE (op0
) == REG
35366 && GET_MODE (op0
) != V4SImode
)
35367 || (GET_CODE (op0
) == SUBREG
35368 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35370 if (!BYTES_BIG_ENDIAN
35371 && icode
== CODE_FOR_altivec_vpkuhum_direct
35372 && ((GET_CODE (op0
) == REG
35373 && GET_MODE (op0
) != V8HImode
)
35374 || (GET_CODE (op0
) == SUBREG
35375 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35378 /* For little-endian, the two input operands must be swapped
35379 (or swapped back) to ensure proper right-to-left numbering
35381 if (swapped
^ !BYTES_BIG_ENDIAN
)
35382 std::swap (op0
, op1
);
35383 if (imode
!= V16QImode
)
35385 op0
= gen_lowpart (imode
, op0
);
35386 op1
= gen_lowpart (imode
, op1
);
35388 if (omode
== V16QImode
)
35391 x
= gen_reg_rtx (omode
);
35392 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35393 if (omode
!= V16QImode
)
35394 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35399 if (!BYTES_BIG_ENDIAN
)
35401 altivec_expand_vec_perm_const_le (operands
);
35408 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35409 Return true if we match an efficient implementation. */
35412 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35413 unsigned char perm0
, unsigned char perm1
)
35417 /* If both selectors come from the same operand, fold to single op. */
35418 if ((perm0
& 2) == (perm1
& 2))
35425 /* If both operands are equal, fold to simpler permutation. */
35426 if (rtx_equal_p (op0
, op1
))
35429 perm1
= (perm1
& 1) + 2;
35431 /* If the first selector comes from the second operand, swap. */
35432 else if (perm0
& 2)
35438 std::swap (op0
, op1
);
35440 /* If the second selector does not come from the second operand, fail. */
35441 else if ((perm1
& 2) == 0)
35445 if (target
!= NULL
)
35447 machine_mode vmode
, dmode
;
35450 vmode
= GET_MODE (target
);
35451 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35452 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
35453 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35454 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35455 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35456 emit_insn (gen_rtx_SET (target
, x
));
35462 rs6000_expand_vec_perm_const (rtx operands
[4])
35464 rtx target
, op0
, op1
, sel
;
35465 unsigned char perm0
, perm1
;
35467 target
= operands
[0];
35472 /* Unpack the constant selector. */
35473 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
35474 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
35476 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
35479 /* Test whether a constant permutation is supported. */
35482 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode
,
35483 const unsigned char *sel
)
35485 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35486 if (TARGET_ALTIVEC
)
35489 /* Check for ps_merge* or evmerge* insns. */
35490 if (TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
35492 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
35493 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
35494 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
35500 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35503 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
35504 machine_mode vmode
, unsigned nelt
, rtx perm
[])
35506 machine_mode imode
;
35510 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
35512 imode
= mode_for_size (GET_MODE_UNIT_BITSIZE (vmode
), MODE_INT
, 0);
35513 imode
= mode_for_vector (imode
, nelt
);
35516 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
35517 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
35519 emit_move_insn (target
, x
);
35522 /* Expand an extract even operation. */
35525 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
35527 machine_mode vmode
= GET_MODE (target
);
35528 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
35531 for (i
= 0; i
< nelt
; i
++)
35532 perm
[i
] = GEN_INT (i
* 2);
35534 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
35537 /* Expand a vector interleave operation. */
35540 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
35542 machine_mode vmode
= GET_MODE (target
);
35543 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
35546 high
= (highp
? 0 : nelt
/ 2);
35547 for (i
= 0; i
< nelt
/ 2; i
++)
35549 perm
[i
* 2] = GEN_INT (i
+ high
);
35550 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
35553 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
35556 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35558 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
35560 HOST_WIDE_INT
hwi_scale (scale
);
35561 REAL_VALUE_TYPE r_pow
;
35562 rtvec v
= rtvec_alloc (2);
35564 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
35565 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
35566 elt
= const_double_from_real_value (r_pow
, DFmode
);
35567 RTVEC_ELT (v
, 0) = elt
;
35568 RTVEC_ELT (v
, 1) = elt
;
35569 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
35570 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
35573 /* Return an RTX representing where to find the function value of a
35574 function returning MODE. */
35576 rs6000_complex_function_value (machine_mode mode
)
35578 unsigned int regno
;
35580 machine_mode inner
= GET_MODE_INNER (mode
);
35581 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
35583 if (TARGET_FLOAT128_TYPE
35585 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
35586 regno
= ALTIVEC_ARG_RETURN
;
35588 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35589 regno
= FP_ARG_RETURN
;
35593 regno
= GP_ARG_RETURN
;
35595 /* 32-bit is OK since it'll go in r3/r4. */
35596 if (TARGET_32BIT
&& inner_bytes
>= 4)
35597 return gen_rtx_REG (mode
, regno
);
35600 if (inner_bytes
>= 8)
35601 return gen_rtx_REG (mode
, regno
);
35603 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
35605 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
35606 GEN_INT (inner_bytes
));
35607 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
35610 /* Return an rtx describing a return value of MODE as a PARALLEL
35611 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35612 stride REG_STRIDE. */
35615 rs6000_parallel_return (machine_mode mode
,
35616 int n_elts
, machine_mode elt_mode
,
35617 unsigned int regno
, unsigned int reg_stride
)
35619 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
35622 for (i
= 0; i
< n_elts
; i
++)
35624 rtx r
= gen_rtx_REG (elt_mode
, regno
);
35625 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
35626 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
35627 regno
+= reg_stride
;
35633 /* Target hook for TARGET_FUNCTION_VALUE.
35635 An integer value is in r3 and a floating-point value is in fp1,
35636 unless -msoft-float. */
35639 rs6000_function_value (const_tree valtype
,
35640 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
35641 bool outgoing ATTRIBUTE_UNUSED
)
35644 unsigned int regno
;
35645 machine_mode elt_mode
;
35648 /* Special handling for structs in darwin64. */
35650 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
35652 CUMULATIVE_ARGS valcum
;
35656 valcum
.fregno
= FP_ARG_MIN_REG
;
35657 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
35658 /* Do a trial code generation as if this were going to be passed as
35659 an argument; if any part goes in memory, we return NULL. */
35660 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
35663 /* Otherwise fall through to standard ABI rules. */
35666 mode
= TYPE_MODE (valtype
);
35668 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35669 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
35671 int first_reg
, n_regs
;
35673 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
35675 /* _Decimal128 must use even/odd register pairs. */
35676 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35677 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
35681 first_reg
= ALTIVEC_ARG_RETURN
;
35685 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
35688 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35689 if (TARGET_32BIT
&& TARGET_POWERPC64
)
35698 int count
= GET_MODE_SIZE (mode
) / 4;
35699 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
35702 if ((INTEGRAL_TYPE_P (valtype
)
35703 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
35704 || POINTER_TYPE_P (valtype
))
35705 mode
= TARGET_32BIT
? SImode
: DImode
;
35707 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35708 /* _Decimal128 must use an even/odd register pair. */
35709 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35710 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
35711 && !FLOAT128_VECTOR_P (mode
)
35712 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
35713 regno
= FP_ARG_RETURN
;
35714 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
35715 && targetm
.calls
.split_complex_arg
)
35716 return rs6000_complex_function_value (mode
);
35717 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35718 return register is used in both cases, and we won't see V2DImode/V2DFmode
35719 for pure altivec, combine the two cases. */
35720 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
35721 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
35722 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
35723 regno
= ALTIVEC_ARG_RETURN
;
35725 regno
= GP_ARG_RETURN
;
35727 return gen_rtx_REG (mode
, regno
);
35730 /* Define how to find the value returned by a library function
35731 assuming the value has mode MODE. */
35733 rs6000_libcall_value (machine_mode mode
)
35735 unsigned int regno
;
35737 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35738 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
35739 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
35741 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35742 /* _Decimal128 must use an even/odd register pair. */
35743 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35744 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
)
35745 && TARGET_HARD_FLOAT
35746 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
35747 regno
= FP_ARG_RETURN
;
35748 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35749 return register is used in both cases, and we won't see V2DImode/V2DFmode
35750 for pure altivec, combine the two cases. */
35751 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
35752 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
35753 regno
= ALTIVEC_ARG_RETURN
;
35754 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
35755 return rs6000_complex_function_value (mode
);
35757 regno
= GP_ARG_RETURN
;
35759 return gen_rtx_REG (mode
, regno
);
35762 /* Compute register pressure classes. We implement the target hook to avoid
35763 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35764 lead to incorrect estimates of number of available registers and therefor
35765 increased register pressure/spill. */
35767 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
35772 pressure_classes
[n
++] = GENERAL_REGS
;
35774 pressure_classes
[n
++] = VSX_REGS
;
35777 if (TARGET_ALTIVEC
)
35778 pressure_classes
[n
++] = ALTIVEC_REGS
;
35779 if (TARGET_HARD_FLOAT
)
35780 pressure_classes
[n
++] = FLOAT_REGS
;
35782 pressure_classes
[n
++] = CR_REGS
;
35783 pressure_classes
[n
++] = SPECIAL_REGS
;
35788 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35789 Frame pointer elimination is automatically handled.
35791 For the RS/6000, if frame pointer elimination is being done, we would like
35792 to convert ap into fp, not sp.
35794 We need r30 if -mminimal-toc was specified, and there are constant pool
35798 rs6000_can_eliminate (const int from
, const int to
)
35800 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
35801 ? ! frame_pointer_needed
35802 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
35803 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
35804 || constant_pool_empty_p ()
35808 /* Define the offset between two registers, FROM to be eliminated and its
35809 replacement TO, at the start of a routine. */
35811 rs6000_initial_elimination_offset (int from
, int to
)
35813 rs6000_stack_t
*info
= rs6000_stack_info ();
35814 HOST_WIDE_INT offset
;
35816 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35817 offset
= info
->push_p
? 0 : -info
->total_size
;
35818 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35820 offset
= info
->push_p
? 0 : -info
->total_size
;
35821 if (FRAME_GROWS_DOWNWARD
)
35822 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
35824 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
35825 offset
= FRAME_GROWS_DOWNWARD
35826 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
35828 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
35829 offset
= info
->total_size
;
35830 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35831 offset
= info
->push_p
? info
->total_size
: 0;
35832 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
35835 gcc_unreachable ();
35840 /* Fill in sizes of registers used by unwinder. */
35843 rs6000_init_dwarf_reg_sizes_extra (tree address
)
35845 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
35848 machine_mode mode
= TYPE_MODE (char_type_node
);
35849 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
35850 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
35851 rtx value
= gen_int_mode (16, mode
);
35853 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35854 The unwinder still needs to know the size of Altivec registers. */
35856 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
35858 int column
= DWARF_REG_TO_UNWIND_COLUMN
35859 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
35860 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
35862 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
35867 /* Map internal gcc register numbers to debug format register numbers.
35868 FORMAT specifies the type of debug register number to use:
35869 0 -- debug information, except for frame-related sections
35870 1 -- DWARF .debug_frame section
35871 2 -- DWARF .eh_frame section */
35874 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
35876 /* Except for the above, we use the internal number for non-DWARF
35877 debug information, and also for .eh_frame. */
35878 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
35881 /* On some platforms, we use the standard DWARF register
35882 numbering for .debug_info and .debug_frame. */
35883 #ifdef RS6000_USE_DWARF_NUMBERING
35886 if (regno
== LR_REGNO
)
35888 if (regno
== CTR_REGNO
)
35890 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35891 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35892 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35893 to the DWARF reg for CR. */
35894 if (format
== 1 && regno
== CR2_REGNO
)
35896 if (CR_REGNO_P (regno
))
35897 return regno
- CR0_REGNO
+ 86;
35898 if (regno
== CA_REGNO
)
35899 return 101; /* XER */
35900 if (ALTIVEC_REGNO_P (regno
))
35901 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
35902 if (regno
== VRSAVE_REGNO
)
35904 if (regno
== VSCR_REGNO
)
35910 /* target hook eh_return_filter_mode */
35911 static machine_mode
35912 rs6000_eh_return_filter_mode (void)
35914 return TARGET_32BIT
? SImode
: word_mode
;
35917 /* Target hook for scalar_mode_supported_p. */
35919 rs6000_scalar_mode_supported_p (machine_mode mode
)
35921 /* -m32 does not support TImode. This is the default, from
35922 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35923 same ABI as for -m32. But default_scalar_mode_supported_p allows
35924 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35925 for -mpowerpc64. */
35926 if (TARGET_32BIT
&& mode
== TImode
)
35929 if (DECIMAL_FLOAT_MODE_P (mode
))
35930 return default_decimal_float_supported_p ();
35931 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
35934 return default_scalar_mode_supported_p (mode
);
35937 /* Target hook for vector_mode_supported_p. */
35939 rs6000_vector_mode_supported_p (machine_mode mode
)
35942 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
35945 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35946 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35948 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
35955 /* Target hook for floatn_mode. */
35956 static machine_mode
35957 rs6000_floatn_mode (int n
, bool extended
)
35967 if (TARGET_FLOAT128_KEYWORD
)
35968 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
35976 /* Those are the only valid _FloatNx types. */
35977 gcc_unreachable ();
35991 if (TARGET_FLOAT128_KEYWORD
)
35992 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36003 /* Target hook for c_mode_for_suffix. */
36004 static machine_mode
36005 rs6000_c_mode_for_suffix (char suffix
)
36007 if (TARGET_FLOAT128_TYPE
)
36009 if (suffix
== 'q' || suffix
== 'Q')
36010 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
36012 /* At the moment, we are not defining a suffix for IBM extended double.
36013 If/when the default for -mabi=ieeelongdouble is changed, and we want
36014 to support __ibm128 constants in legacy library code, we may need to
36015 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36016 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36017 __float80 constants. */
36023 /* Target hook for invalid_arg_for_unprototyped_fn. */
36024 static const char *
36025 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
36027 return (!rs6000_darwin64_abi
36029 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
36030 && (funcdecl
== NULL_TREE
36031 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
36032 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
36033 ? N_("AltiVec argument passed to unprototyped function")
36037 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36038 setup by using __stack_chk_fail_local hidden function instead of
36039 calling __stack_chk_fail directly. Otherwise it is better to call
36040 __stack_chk_fail directly. */
36042 static tree ATTRIBUTE_UNUSED
36043 rs6000_stack_protect_fail (void)
36045 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
36046 ? default_hidden_stack_protect_fail ()
36047 : default_external_stack_protect_fail ();
36050 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36053 static unsigned HOST_WIDE_INT
36054 rs6000_asan_shadow_offset (void)
36056 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
36060 /* Mask options that we want to support inside of attribute((target)) and
36061 #pragma GCC target operations. Note, we do not include things like
36062 64/32-bit, endianness, hard/soft floating point, etc. that would have
36063 different calling sequences. */
36065 struct rs6000_opt_mask
{
36066 const char *name
; /* option name */
36067 HOST_WIDE_INT mask
; /* mask to set */
36068 bool invert
; /* invert sense of mask */
36069 bool valid_target
; /* option is a target option */
36072 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
36074 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
36075 { "cmpb", OPTION_MASK_CMPB
, false, true },
36076 { "crypto", OPTION_MASK_CRYPTO
, false, true },
36077 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
36078 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
36079 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
36081 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, false },
36082 { "float128-type", OPTION_MASK_FLOAT128_TYPE
, false, false },
36083 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, false },
36084 { "fprnd", OPTION_MASK_FPRND
, false, true },
36085 { "hard-dfp", OPTION_MASK_DFP
, false, true },
36086 { "htm", OPTION_MASK_HTM
, false, true },
36087 { "isel", OPTION_MASK_ISEL
, false, true },
36088 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
36089 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
36090 { "modulo", OPTION_MASK_MODULO
, false, true },
36091 { "mulhw", OPTION_MASK_MULHW
, false, true },
36092 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
36093 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
36094 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
36095 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
36096 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
36097 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
36098 { "power9-fusion", OPTION_MASK_P9_FUSION
, false, true },
36099 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
36100 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
36101 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
36102 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
36103 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
36104 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
36105 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
36106 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
36107 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
36108 { "string", OPTION_MASK_STRING
, false, true },
36109 { "toc-fusion", OPTION_MASK_TOC_FUSION
, false, true },
36110 { "update", OPTION_MASK_NO_UPDATE
, true , true },
36111 { "vsx", OPTION_MASK_VSX
, false, true },
36112 #ifdef OPTION_MASK_64BIT
36114 { "aix64", OPTION_MASK_64BIT
, false, false },
36115 { "aix32", OPTION_MASK_64BIT
, true, false },
36117 { "64", OPTION_MASK_64BIT
, false, false },
36118 { "32", OPTION_MASK_64BIT
, true, false },
36121 #ifdef OPTION_MASK_EABI
36122 { "eabi", OPTION_MASK_EABI
, false, false },
36124 #ifdef OPTION_MASK_LITTLE_ENDIAN
36125 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
36126 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
36128 #ifdef OPTION_MASK_RELOCATABLE
36129 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
36131 #ifdef OPTION_MASK_STRICT_ALIGN
36132 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
36134 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
36135 { "string", OPTION_MASK_STRING
, false, false },
36138 /* Builtin mask mapping for printing the flags. */
36139 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
36141 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
36142 { "vsx", RS6000_BTM_VSX
, false, false },
36143 { "paired", RS6000_BTM_PAIRED
, false, false },
36144 { "fre", RS6000_BTM_FRE
, false, false },
36145 { "fres", RS6000_BTM_FRES
, false, false },
36146 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
36147 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
36148 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
36149 { "cell", RS6000_BTM_CELL
, false, false },
36150 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
36151 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
36152 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
36153 { "crypto", RS6000_BTM_CRYPTO
, false, false },
36154 { "htm", RS6000_BTM_HTM
, false, false },
36155 { "hard-dfp", RS6000_BTM_DFP
, false, false },
36156 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
36157 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
36158 { "float128", RS6000_BTM_FLOAT128
, false, false },
36161 /* Option variables that we want to support inside attribute((target)) and
36162 #pragma GCC target operations. */
36164 struct rs6000_opt_var
{
36165 const char *name
; /* option name */
36166 size_t global_offset
; /* offset of the option in global_options. */
36167 size_t target_offset
; /* offset of the option in target options. */
36170 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
36173 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
36174 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
36175 { "avoid-indexed-addresses",
36176 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
36177 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
36179 offsetof (struct gcc_options
, x_rs6000_paired_float
),
36180 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
36182 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
36183 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
36184 { "optimize-swaps",
36185 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
36186 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
36187 { "allow-movmisalign",
36188 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
36189 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
36191 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
36192 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
36194 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
36195 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
36196 { "align-branch-targets",
36197 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
36198 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
36200 offsetof (struct gcc_options
, x_tls_markers
),
36201 offsetof (struct cl_target_option
, x_tls_markers
), },
36203 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36204 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36206 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
36207 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
36210 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36211 parsing. Return true if there were no errors. */
36214 rs6000_inner_target_options (tree args
, bool attr_p
)
36218 if (args
== NULL_TREE
)
36221 else if (TREE_CODE (args
) == STRING_CST
)
36223 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36226 while ((q
= strtok (p
, ",")) != NULL
)
36228 bool error_p
= false;
36229 bool not_valid_p
= false;
36230 const char *cpu_opt
= NULL
;
36233 if (strncmp (q
, "cpu=", 4) == 0)
36235 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36236 if (cpu_index
>= 0)
36237 rs6000_cpu_index
= cpu_index
;
36244 else if (strncmp (q
, "tune=", 5) == 0)
36246 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36247 if (tune_index
>= 0)
36248 rs6000_tune_index
= tune_index
;
36258 bool invert
= false;
36262 if (strncmp (r
, "no-", 3) == 0)
36268 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36269 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36271 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36273 if (!rs6000_opt_masks
[i
].valid_target
)
36274 not_valid_p
= true;
36278 rs6000_isa_flags_explicit
|= mask
;
36280 /* VSX needs altivec, so -mvsx automagically sets
36281 altivec and disables -mavoid-indexed-addresses. */
36284 if (mask
== OPTION_MASK_VSX
)
36286 mask
|= OPTION_MASK_ALTIVEC
;
36287 TARGET_AVOID_XFORM
= 0;
36291 if (rs6000_opt_masks
[i
].invert
)
36295 rs6000_isa_flags
&= ~mask
;
36297 rs6000_isa_flags
|= mask
;
36302 if (error_p
&& !not_valid_p
)
36304 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36305 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36307 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36308 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36310 not_valid_p
= false;
36318 const char *eprefix
, *esuffix
;
36323 eprefix
= "__attribute__((__target__(";
36328 eprefix
= "#pragma GCC target ";
36333 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36335 else if (not_valid_p
)
36336 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36338 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36343 else if (TREE_CODE (args
) == TREE_LIST
)
36347 tree value
= TREE_VALUE (args
);
36350 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36354 args
= TREE_CHAIN (args
);
36356 while (args
!= NULL_TREE
);
36361 error ("attribute %<target%> argument not a string");
36368 /* Print out the target options as a list for -mdebug=target. */
36371 rs6000_debug_target_options (tree args
, const char *prefix
)
36373 if (args
== NULL_TREE
)
36374 fprintf (stderr
, "%s<NULL>", prefix
);
36376 else if (TREE_CODE (args
) == STRING_CST
)
36378 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36381 while ((q
= strtok (p
, ",")) != NULL
)
36384 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36389 else if (TREE_CODE (args
) == TREE_LIST
)
36393 tree value
= TREE_VALUE (args
);
36396 rs6000_debug_target_options (value
, prefix
);
36399 args
= TREE_CHAIN (args
);
36401 while (args
!= NULL_TREE
);
36405 gcc_unreachable ();
36411 /* Hook to validate attribute((target("..."))). */
36414 rs6000_valid_attribute_p (tree fndecl
,
36415 tree
ARG_UNUSED (name
),
36419 struct cl_target_option cur_target
;
36421 tree old_optimize
= build_optimization_node (&global_options
);
36422 tree new_target
, new_optimize
;
36423 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36425 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36427 if (TARGET_DEBUG_TARGET
)
36429 tree tname
= DECL_NAME (fndecl
);
36430 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36432 fprintf (stderr
, "function: %.*s\n",
36433 (int) IDENTIFIER_LENGTH (tname
),
36434 IDENTIFIER_POINTER (tname
));
36436 fprintf (stderr
, "function: unknown\n");
36438 fprintf (stderr
, "args:");
36439 rs6000_debug_target_options (args
, " ");
36440 fprintf (stderr
, "\n");
36443 fprintf (stderr
, "flags: 0x%x\n", flags
);
36445 fprintf (stderr
, "--------------------\n");
36448 /* attribute((target("default"))) does nothing, beyond
36449 affecting multi-versioning. */
36450 if (TREE_VALUE (args
)
36451 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36452 && TREE_CHAIN (args
) == NULL_TREE
36453 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36456 old_optimize
= build_optimization_node (&global_options
);
36457 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36459 /* If the function changed the optimization levels as well as setting target
36460 options, start with the optimizations specified. */
36461 if (func_optimize
&& func_optimize
!= old_optimize
)
36462 cl_optimization_restore (&global_options
,
36463 TREE_OPTIMIZATION (func_optimize
));
36465 /* The target attributes may also change some optimization flags, so update
36466 the optimization options if necessary. */
36467 cl_target_option_save (&cur_target
, &global_options
);
36468 rs6000_cpu_index
= rs6000_tune_index
= -1;
36469 ret
= rs6000_inner_target_options (args
, true);
36471 /* Set up any additional state. */
36474 ret
= rs6000_option_override_internal (false);
36475 new_target
= build_target_option_node (&global_options
);
36480 new_optimize
= build_optimization_node (&global_options
);
36487 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
36489 if (old_optimize
!= new_optimize
)
36490 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
36493 cl_target_option_restore (&global_options
, &cur_target
);
36495 if (old_optimize
!= new_optimize
)
36496 cl_optimization_restore (&global_options
,
36497 TREE_OPTIMIZATION (old_optimize
));
36503 /* Hook to validate the current #pragma GCC target and set the state, and
36504 update the macros based on what was changed. If ARGS is NULL, then
36505 POP_TARGET is used to reset the options. */
36508 rs6000_pragma_target_parse (tree args
, tree pop_target
)
36510 tree prev_tree
= build_target_option_node (&global_options
);
36512 struct cl_target_option
*prev_opt
, *cur_opt
;
36513 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
36514 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
36516 if (TARGET_DEBUG_TARGET
)
36518 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
36519 fprintf (stderr
, "args:");
36520 rs6000_debug_target_options (args
, " ");
36521 fprintf (stderr
, "\n");
36525 fprintf (stderr
, "pop_target:\n");
36526 debug_tree (pop_target
);
36529 fprintf (stderr
, "pop_target: <NULL>\n");
36531 fprintf (stderr
, "--------------------\n");
36536 cur_tree
= ((pop_target
)
36538 : target_option_default_node
);
36539 cl_target_option_restore (&global_options
,
36540 TREE_TARGET_OPTION (cur_tree
));
36544 rs6000_cpu_index
= rs6000_tune_index
= -1;
36545 if (!rs6000_inner_target_options (args
, false)
36546 || !rs6000_option_override_internal (false)
36547 || (cur_tree
= build_target_option_node (&global_options
))
36550 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
36551 fprintf (stderr
, "invalid pragma\n");
36557 target_option_current_node
= cur_tree
;
36559 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36560 change the macros that are defined. */
36561 if (rs6000_target_modify_macros_ptr
)
36563 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
36564 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
36565 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
36567 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
36568 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
36569 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
36571 diff_bumask
= (prev_bumask
^ cur_bumask
);
36572 diff_flags
= (prev_flags
^ cur_flags
);
36574 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
36576 /* Delete old macros. */
36577 rs6000_target_modify_macros_ptr (false,
36578 prev_flags
& diff_flags
,
36579 prev_bumask
& diff_bumask
);
36581 /* Define new macros. */
36582 rs6000_target_modify_macros_ptr (true,
36583 cur_flags
& diff_flags
,
36584 cur_bumask
& diff_bumask
);
36592 /* Remember the last target of rs6000_set_current_function. */
36593 static GTY(()) tree rs6000_previous_fndecl
;
36595 /* Restore target's globals from NEW_TREE and invalidate the
36596 rs6000_previous_fndecl cache. */
36599 rs6000_activate_target_options (tree new_tree
)
36601 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
36602 if (TREE_TARGET_GLOBALS (new_tree
))
36603 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
36604 else if (new_tree
== target_option_default_node
)
36605 restore_target_globals (&default_target_globals
);
36607 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
36608 rs6000_previous_fndecl
= NULL_TREE
;
36611 /* Establish appropriate back-end context for processing the function
36612 FNDECL. The argument might be NULL to indicate processing at top
36613 level, outside of any function scope. */
36615 rs6000_set_current_function (tree fndecl
)
36617 if (TARGET_DEBUG_TARGET
)
36619 fprintf (stderr
, "\n==================== rs6000_set_current_function");
36622 fprintf (stderr
, ", fndecl %s (%p)",
36623 (DECL_NAME (fndecl
)
36624 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
36625 : "<unknown>"), (void *)fndecl
);
36627 if (rs6000_previous_fndecl
)
36628 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
36630 fprintf (stderr
, "\n");
36633 /* Only change the context if the function changes. This hook is called
36634 several times in the course of compiling a function, and we don't want to
36635 slow things down too much or call target_reinit when it isn't safe. */
36636 if (fndecl
== rs6000_previous_fndecl
)
36640 if (rs6000_previous_fndecl
== NULL_TREE
)
36641 old_tree
= target_option_current_node
;
36642 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
36643 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
36645 old_tree
= target_option_default_node
;
36648 if (fndecl
== NULL_TREE
)
36650 if (old_tree
!= target_option_current_node
)
36651 new_tree
= target_option_current_node
;
36653 new_tree
= NULL_TREE
;
36657 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36658 if (new_tree
== NULL_TREE
)
36659 new_tree
= target_option_default_node
;
36662 if (TARGET_DEBUG_TARGET
)
36666 fprintf (stderr
, "\nnew fndecl target specific options:\n");
36667 debug_tree (new_tree
);
36672 fprintf (stderr
, "\nold fndecl target specific options:\n");
36673 debug_tree (old_tree
);
36676 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
36677 fprintf (stderr
, "--------------------\n");
36680 if (new_tree
&& old_tree
!= new_tree
)
36681 rs6000_activate_target_options (new_tree
);
36684 rs6000_previous_fndecl
= fndecl
;
36688 /* Save the current options */
36691 rs6000_function_specific_save (struct cl_target_option
*ptr
,
36692 struct gcc_options
*opts
)
36694 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
36695 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
36698 /* Restore the current options */
36701 rs6000_function_specific_restore (struct gcc_options
*opts
,
36702 struct cl_target_option
*ptr
)
36705 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
36706 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
36707 (void) rs6000_option_override_internal (false);
36710 /* Print the current options */
36713 rs6000_function_specific_print (FILE *file
, int indent
,
36714 struct cl_target_option
*ptr
)
36716 rs6000_print_isa_options (file
, indent
, "Isa options set",
36717 ptr
->x_rs6000_isa_flags
);
36719 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
36720 ptr
->x_rs6000_isa_flags_explicit
);
36723 /* Helper function to print the current isa or misc options on a line. */
36726 rs6000_print_options_internal (FILE *file
,
36728 const char *string
,
36729 HOST_WIDE_INT flags
,
36730 const char *prefix
,
36731 const struct rs6000_opt_mask
*opts
,
36732 size_t num_elements
)
36735 size_t start_column
= 0;
36737 size_t max_column
= 120;
36738 size_t prefix_len
= strlen (prefix
);
36739 size_t comma_len
= 0;
36740 const char *comma
= "";
36743 start_column
+= fprintf (file
, "%*s", indent
, "");
36747 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
36751 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
36753 /* Print the various mask options. */
36754 cur_column
= start_column
;
36755 for (i
= 0; i
< num_elements
; i
++)
36757 bool invert
= opts
[i
].invert
;
36758 const char *name
= opts
[i
].name
;
36759 const char *no_str
= "";
36760 HOST_WIDE_INT mask
= opts
[i
].mask
;
36761 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
36765 if ((flags
& mask
) == 0)
36768 len
+= sizeof ("no-") - 1;
36776 if ((flags
& mask
) != 0)
36779 len
+= sizeof ("no-") - 1;
36786 if (cur_column
> max_column
)
36788 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
36789 cur_column
= start_column
+ len
;
36793 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
36795 comma_len
= sizeof (", ") - 1;
36798 fputs ("\n", file
);
36801 /* Helper function to print the current isa options on a line. */
36804 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
36805 HOST_WIDE_INT flags
)
36807 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
36808 &rs6000_opt_masks
[0],
36809 ARRAY_SIZE (rs6000_opt_masks
));
36813 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
36814 HOST_WIDE_INT flags
)
36816 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
36817 &rs6000_builtin_mask_names
[0],
36818 ARRAY_SIZE (rs6000_builtin_mask_names
));
36821 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36822 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36823 -mupper-regs-df, etc.).
36825 If the user used -mno-power8-vector, we need to turn off all of the implicit
36826 ISA 2.07 and 3.0 options that relate to the vector unit.
36828 If the user used -mno-power9-vector, we need to turn off all of the implicit
36829 ISA 3.0 options that relate to the vector unit.
36831 This function does not handle explicit options such as the user specifying
36832 -mdirect-move. These are handled in rs6000_option_override_internal, and
36833 the appropriate error is given if needed.
36835 We return a mask of all of the implicit options that should not be enabled
36838 static HOST_WIDE_INT
36839 rs6000_disable_incompatible_switches (void)
36841 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
36844 static const struct {
36845 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
36846 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
36847 const char *const name
; /* name of the switch. */
36849 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
36850 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
36851 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
36854 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
36856 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
36858 if ((rs6000_isa_flags
& no_flag
) == 0
36859 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
36861 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
36862 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
36868 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
36869 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
36871 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
36872 error ("%<-mno-%s%> turns off %<-m%s%>",
36874 rs6000_opt_masks
[j
].name
);
36877 gcc_assert (!set_flags
);
36880 rs6000_isa_flags
&= ~dep_flags
;
36881 ignore_masks
|= no_flag
| dep_flags
;
36885 return ignore_masks
;
36889 /* Helper function for printing the function name when debugging. */
36891 static const char *
36892 get_decl_name (tree fn
)
36899 name
= DECL_NAME (fn
);
36901 return "<no-name>";
36903 return IDENTIFIER_POINTER (name
);
36906 /* Return the clone id of the target we are compiling code for in a target
36907 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36908 the priority list for the target clones (ordered from lowest to
36912 rs6000_clone_priority (tree fndecl
)
36914 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36915 HOST_WIDE_INT isa_masks
;
36916 int ret
= CLONE_DEFAULT
;
36917 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
36918 const char *attrs_str
= NULL
;
36920 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
36921 attrs_str
= TREE_STRING_POINTER (attrs
);
36923 /* Return priority zero for default function. Return the ISA needed for the
36924 function if it is not the default. */
36925 if (strcmp (attrs_str
, "default") != 0)
36927 if (fn_opts
== NULL_TREE
)
36928 fn_opts
= target_option_default_node
;
36930 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
36931 isa_masks
= rs6000_isa_flags
;
36933 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
36935 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
36936 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
36940 if (TARGET_DEBUG_TARGET
)
36941 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
36942 get_decl_name (fndecl
), ret
);
36947 /* This compares the priority of target features in function DECL1 and DECL2.
36948 It returns positive value if DECL1 is higher priority, negative value if
36949 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36950 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36953 rs6000_compare_version_priority (tree decl1
, tree decl2
)
36955 int priority1
= rs6000_clone_priority (decl1
);
36956 int priority2
= rs6000_clone_priority (decl2
);
36957 int ret
= priority1
- priority2
;
36959 if (TARGET_DEBUG_TARGET
)
36960 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
36961 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
36966 /* Make a dispatcher declaration for the multi-versioned function DECL.
36967 Calls to DECL function will be replaced with calls to the dispatcher
36968 by the front-end. Returns the decl of the dispatcher function. */
36971 rs6000_get_function_versions_dispatcher (void *decl
)
36973 tree fn
= (tree
) decl
;
36974 struct cgraph_node
*node
= NULL
;
36975 struct cgraph_node
*default_node
= NULL
;
36976 struct cgraph_function_version_info
*node_v
= NULL
;
36977 struct cgraph_function_version_info
*first_v
= NULL
;
36979 tree dispatch_decl
= NULL
;
36981 struct cgraph_function_version_info
*default_version_info
= NULL
;
36982 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
36984 if (TARGET_DEBUG_TARGET
)
36985 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
36986 get_decl_name (fn
));
36988 node
= cgraph_node::get (fn
);
36989 gcc_assert (node
!= NULL
);
36991 node_v
= node
->function_version ();
36992 gcc_assert (node_v
!= NULL
);
36994 if (node_v
->dispatcher_resolver
!= NULL
)
36995 return node_v
->dispatcher_resolver
;
36997 /* Find the default version and make it the first node. */
36999 /* Go to the beginning of the chain. */
37000 while (first_v
->prev
!= NULL
)
37001 first_v
= first_v
->prev
;
37003 default_version_info
= first_v
;
37004 while (default_version_info
!= NULL
)
37006 const tree decl2
= default_version_info
->this_node
->decl
;
37007 if (is_function_default_version (decl2
))
37009 default_version_info
= default_version_info
->next
;
37012 /* If there is no default node, just return NULL. */
37013 if (default_version_info
== NULL
)
37016 /* Make default info the first node. */
37017 if (first_v
!= default_version_info
)
37019 default_version_info
->prev
->next
= default_version_info
->next
;
37020 if (default_version_info
->next
)
37021 default_version_info
->next
->prev
= default_version_info
->prev
;
37022 first_v
->prev
= default_version_info
;
37023 default_version_info
->next
= first_v
;
37024 default_version_info
->prev
= NULL
;
37027 default_node
= default_version_info
->this_node
;
37029 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37030 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37031 "target_clones attribute needs GLIBC (2.23 and newer) that "
37032 "exports hardware capability bits");
37035 if (targetm
.has_ifunc_p ())
37037 struct cgraph_function_version_info
*it_v
= NULL
;
37038 struct cgraph_node
*dispatcher_node
= NULL
;
37039 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
37041 /* Right now, the dispatching is done via ifunc. */
37042 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
37044 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
37045 gcc_assert (dispatcher_node
!= NULL
);
37046 dispatcher_node
->dispatcher_function
= 1;
37047 dispatcher_version_info
37048 = dispatcher_node
->insert_new_function_version ();
37049 dispatcher_version_info
->next
= default_version_info
;
37050 dispatcher_node
->definition
= 1;
37052 /* Set the dispatcher for all the versions. */
37053 it_v
= default_version_info
;
37054 while (it_v
!= NULL
)
37056 it_v
->dispatcher_resolver
= dispatch_decl
;
37062 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
37063 "multiversioning needs ifunc which is not supported "
37068 return dispatch_decl
;
37071 /* Make the resolver function decl to dispatch the versions of a multi-
37072 versioned function, DEFAULT_DECL. Create an empty basic block in the
37073 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37077 make_resolver_func (const tree default_decl
,
37078 const tree dispatch_decl
,
37079 basic_block
*empty_bb
)
37081 /* Make the resolver function static. The resolver function returns
37083 tree decl_name
= clone_function_name (default_decl
, "resolver");
37084 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
37085 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
37086 tree decl
= build_fn_decl (resolver_name
, type
);
37087 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
37089 DECL_NAME (decl
) = decl_name
;
37090 TREE_USED (decl
) = 1;
37091 DECL_ARTIFICIAL (decl
) = 1;
37092 DECL_IGNORED_P (decl
) = 0;
37093 TREE_PUBLIC (decl
) = 0;
37094 DECL_UNINLINABLE (decl
) = 1;
37096 /* Resolver is not external, body is generated. */
37097 DECL_EXTERNAL (decl
) = 0;
37098 DECL_EXTERNAL (dispatch_decl
) = 0;
37100 DECL_CONTEXT (decl
) = NULL_TREE
;
37101 DECL_INITIAL (decl
) = make_node (BLOCK
);
37102 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
37104 /* Build result decl and add to function_decl. */
37105 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
37106 DECL_ARTIFICIAL (t
) = 1;
37107 DECL_IGNORED_P (t
) = 1;
37108 DECL_RESULT (decl
) = t
;
37110 gimplify_function_tree (decl
);
37111 push_cfun (DECL_STRUCT_FUNCTION (decl
));
37112 *empty_bb
= init_lowered_empty_function (decl
, false,
37113 profile_count::uninitialized ());
37115 cgraph_node::add_new_function (decl
, true);
37116 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
37120 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37121 DECL_ATTRIBUTES (dispatch_decl
)
37122 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
37124 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
37129 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37130 return a pointer to VERSION_DECL if we are running on a machine that
37131 supports the index CLONE_ISA hardware architecture bits. This function will
37132 be called during version dispatch to decide which function version to
37133 execute. It returns the basic block at the end, to which more conditions
37137 add_condition_to_bb (tree function_decl
, tree version_decl
,
37138 int clone_isa
, basic_block new_bb
)
37140 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
37142 gcc_assert (new_bb
!= NULL
);
37143 gimple_seq gseq
= bb_seq (new_bb
);
37146 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
37147 build_fold_addr_expr (version_decl
));
37148 tree result_var
= create_tmp_var (ptr_type_node
);
37149 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
37150 gimple
*return_stmt
= gimple_build_return (result_var
);
37152 if (clone_isa
== CLONE_DEFAULT
)
37154 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37155 gimple_seq_add_stmt (&gseq
, return_stmt
);
37156 set_bb_seq (new_bb
, gseq
);
37157 gimple_set_bb (convert_stmt
, new_bb
);
37158 gimple_set_bb (return_stmt
, new_bb
);
37163 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
37164 tree cond_var
= create_tmp_var (bool_int_type_node
);
37165 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
37166 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
37167 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
37168 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
37169 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
37171 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
37172 gimple_set_bb (call_cond_stmt
, new_bb
);
37173 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
37175 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
37176 NULL_TREE
, NULL_TREE
);
37177 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
37178 gimple_set_bb (if_else_stmt
, new_bb
);
37179 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
37181 gimple_seq_add_stmt (&gseq
, convert_stmt
);
37182 gimple_seq_add_stmt (&gseq
, return_stmt
);
37183 set_bb_seq (new_bb
, gseq
);
37185 basic_block bb1
= new_bb
;
37186 edge e12
= split_block (bb1
, if_else_stmt
);
37187 basic_block bb2
= e12
->dest
;
37188 e12
->flags
&= ~EDGE_FALLTHRU
;
37189 e12
->flags
|= EDGE_TRUE_VALUE
;
37191 edge e23
= split_block (bb2
, return_stmt
);
37192 gimple_set_bb (convert_stmt
, bb2
);
37193 gimple_set_bb (return_stmt
, bb2
);
37195 basic_block bb3
= e23
->dest
;
37196 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
37199 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
37205 /* This function generates the dispatch function for multi-versioned functions.
37206 DISPATCH_DECL is the function which will contain the dispatch logic.
37207 FNDECLS are the function choices for dispatch, and is a tree chain.
37208 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37209 code is generated. */
37212 dispatch_function_versions (tree dispatch_decl
,
37214 basic_block
*empty_bb
)
37218 vec
<tree
> *fndecls
;
37219 tree clones
[CLONE_MAX
];
37221 if (TARGET_DEBUG_TARGET
)
37222 fputs ("dispatch_function_versions, top\n", stderr
);
37224 gcc_assert (dispatch_decl
!= NULL
37225 && fndecls_p
!= NULL
37226 && empty_bb
!= NULL
);
37228 /* fndecls_p is actually a vector. */
37229 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37231 /* At least one more version other than the default. */
37232 gcc_assert (fndecls
->length () >= 2);
37234 /* The first version in the vector is the default decl. */
37235 memset ((void *) clones
, '\0', sizeof (clones
));
37236 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37238 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37239 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37240 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37241 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37242 to insert the code here to do the call. */
37244 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37246 int priority
= rs6000_clone_priority (ele
);
37247 if (!clones
[priority
])
37248 clones
[priority
] = ele
;
37251 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37254 if (TARGET_DEBUG_TARGET
)
37255 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37256 ix
, get_decl_name (clones
[ix
]));
37258 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37265 /* Generate the dispatching code body to dispatch multi-versioned function
37266 DECL. The target hook is called to process the "target" attributes and
37267 provide the code to dispatch the right function at run-time. NODE points
37268 to the dispatcher decl whose body will be created. */
37271 rs6000_generate_version_dispatcher_body (void *node_p
)
37274 basic_block empty_bb
;
37275 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37276 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37278 if (ninfo
->dispatcher_resolver
)
37279 return ninfo
->dispatcher_resolver
;
37281 /* node is going to be an alias, so remove the finalized bit. */
37282 node
->definition
= false;
37284 /* The first version in the chain corresponds to the default version. */
37285 ninfo
->dispatcher_resolver
= resolver
37286 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37288 if (TARGET_DEBUG_TARGET
)
37289 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37290 get_decl_name (resolver
));
37292 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37293 auto_vec
<tree
, 2> fn_ver_vec
;
37295 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37297 vinfo
= vinfo
->next
)
37299 struct cgraph_node
*version
= vinfo
->this_node
;
37300 /* Check for virtual functions here again, as by this time it should
37301 have been determined if this function needs a vtable index or
37302 not. This happens for methods in derived classes that override
37303 virtual methods in base classes but are not explicitly marked as
37305 if (DECL_VINDEX (version
->decl
))
37306 sorry ("Virtual function multiversioning not supported");
37308 fn_ver_vec
.safe_push (version
->decl
);
37311 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37312 cgraph_edge::rebuild_edges ();
37318 /* Hook to determine if one function can safely inline another. */
37321 rs6000_can_inline_p (tree caller
, tree callee
)
37324 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37325 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37327 /* If callee has no option attributes, then it is ok to inline. */
37331 /* If caller has no option attributes, but callee does then it is not ok to
37333 else if (!caller_tree
)
37338 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37339 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37341 /* Callee's options should a subset of the caller's, i.e. a vsx function
37342 can inline an altivec function but a non-vsx function can't inline a
37344 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37345 == callee_opts
->x_rs6000_isa_flags
)
37349 if (TARGET_DEBUG_TARGET
)
37350 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37351 get_decl_name (caller
), get_decl_name (callee
),
37352 (ret
? "can" : "cannot"));
37357 /* Allocate a stack temp and fixup the address so it meets the particular
37358 memory requirements (either offetable or REG+REG addressing). */
37361 rs6000_allocate_stack_temp (machine_mode mode
,
37362 bool offsettable_p
,
37365 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37366 rtx addr
= XEXP (stack
, 0);
37367 int strict_p
= reload_completed
;
37369 if (!legitimate_indirect_address_p (addr
, strict_p
))
37372 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37373 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37375 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37376 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37382 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37383 to such a form to deal with memory reference instructions like STFIWX that
37384 only take reg+reg addressing. */
37387 rs6000_address_for_fpconvert (rtx x
)
37391 gcc_assert (MEM_P (x
));
37392 addr
= XEXP (x
, 0);
37393 if (! legitimate_indirect_address_p (addr
, reload_completed
)
37394 && ! legitimate_indexed_address_p (addr
, reload_completed
))
37396 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37398 rtx reg
= XEXP (addr
, 0);
37399 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37400 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37401 gcc_assert (REG_P (reg
));
37402 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37405 else if (GET_CODE (addr
) == PRE_MODIFY
)
37407 rtx reg
= XEXP (addr
, 0);
37408 rtx expr
= XEXP (addr
, 1);
37409 gcc_assert (REG_P (reg
));
37410 gcc_assert (GET_CODE (expr
) == PLUS
);
37411 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37415 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
37421 /* Given a memory reference, if it is not in the form for altivec memory
37422 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37423 convert to the altivec format. */
37426 rs6000_address_for_altivec (rtx x
)
37428 gcc_assert (MEM_P (x
));
37429 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
37431 rtx addr
= XEXP (x
, 0);
37433 if (!legitimate_indexed_address_p (addr
, reload_completed
)
37434 && !legitimate_indirect_address_p (addr
, reload_completed
))
37435 addr
= copy_to_mode_reg (Pmode
, addr
);
37437 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
37438 x
= change_address (x
, GET_MODE (x
), addr
);
37444 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37446 On the RS/6000, all integer constants are acceptable, most won't be valid
37447 for particular insns, though. Only easy FP constants are acceptable. */
37450 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37452 if (TARGET_ELF
&& tls_referenced_p (x
))
37455 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
37456 || GET_MODE (x
) == VOIDmode
37457 || (TARGET_POWERPC64
&& mode
== DImode
)
37458 || easy_fp_constant (x
, mode
)
37459 || easy_vector_constant (x
, mode
));
37463 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37466 chain_already_loaded (rtx_insn
*last
)
37468 for (; last
!= NULL
; last
= PREV_INSN (last
))
37470 if (NONJUMP_INSN_P (last
))
37472 rtx patt
= PATTERN (last
);
37474 if (GET_CODE (patt
) == SET
)
37476 rtx lhs
= XEXP (patt
, 0);
37478 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
37486 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37489 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37491 const bool direct_call_p
37492 = GET_CODE (func_desc
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (func_desc
);
37493 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
37494 rtx toc_load
= NULL_RTX
;
37495 rtx toc_restore
= NULL_RTX
;
37497 rtx abi_reg
= NULL_RTX
;
37502 /* Handle longcall attributes. */
37503 if (INTVAL (cookie
) & CALL_LONG
)
37504 func_desc
= rs6000_longcall_ref (func_desc
);
37506 /* Handle indirect calls. */
37507 if (GET_CODE (func_desc
) != SYMBOL_REF
37508 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
37510 /* Save the TOC into its reserved slot before the call,
37511 and prepare to restore it after the call. */
37512 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
37513 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
37514 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
37515 gen_rtx_PLUS (Pmode
, stack_ptr
,
37516 stack_toc_offset
));
37517 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
37518 gen_rtvec (1, stack_toc_offset
),
37520 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
37522 /* Can we optimize saving the TOC in the prologue or
37523 do we need to do it at every call? */
37524 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
37525 cfun
->machine
->save_toc_in_prologue
= true;
37528 MEM_VOLATILE_P (stack_toc_mem
) = 1;
37529 emit_move_insn (stack_toc_mem
, toc_reg
);
37532 if (DEFAULT_ABI
== ABI_ELFv2
)
37534 /* A function pointer in the ELFv2 ABI is just a plain address, but
37535 the ABI requires it to be loaded into r12 before the call. */
37536 func_addr
= gen_rtx_REG (Pmode
, 12);
37537 emit_move_insn (func_addr
, func_desc
);
37538 abi_reg
= func_addr
;
37542 /* A function pointer under AIX is a pointer to a data area whose
37543 first word contains the actual address of the function, whose
37544 second word contains a pointer to its TOC, and whose third word
37545 contains a value to place in the static chain register (r11).
37546 Note that if we load the static chain, our "trampoline" need
37547 not have any executable code. */
37549 /* Load up address of the actual function. */
37550 func_desc
= force_reg (Pmode
, func_desc
);
37551 func_addr
= gen_reg_rtx (Pmode
);
37552 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
37554 /* Prepare to load the TOC of the called function. Note that the
37555 TOC load must happen immediately before the actual call so
37556 that unwinding the TOC registers works correctly. See the
37557 comment in frob_update_context. */
37558 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
37559 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
37560 gen_rtx_PLUS (Pmode
, func_desc
,
37562 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
37564 /* If we have a static chain, load it up. But, if the call was
37565 originally direct, the 3rd word has not been written since no
37566 trampoline has been built, so we ought not to load it, lest we
37567 override a static chain value. */
37569 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37570 && !chain_already_loaded (get_current_sequence ()->next
->last
))
37572 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
37573 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
37574 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
37575 gen_rtx_PLUS (Pmode
, func_desc
,
37577 emit_move_insn (sc_reg
, func_sc_mem
);
37584 /* Direct calls use the TOC: for local calls, the callee will
37585 assume the TOC register is set; for non-local calls, the
37586 PLT stub needs the TOC register. */
37588 func_addr
= func_desc
;
37591 /* Create the call. */
37592 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
37593 if (value
!= NULL_RTX
)
37594 call
[0] = gen_rtx_SET (value
, call
[0]);
37598 call
[n_call
++] = toc_load
;
37600 call
[n_call
++] = toc_restore
;
37602 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
37604 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
37605 insn
= emit_call_insn (insn
);
37607 /* Mention all registers defined by the ABI to hold information
37608 as uses in CALL_INSN_FUNCTION_USAGE. */
37610 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
37613 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37616 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37621 gcc_assert (INTVAL (cookie
) == 0);
37623 /* Create the call. */
37624 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
37625 if (value
!= NULL_RTX
)
37626 call
[0] = gen_rtx_SET (value
, call
[0]);
37628 call
[1] = simple_return_rtx
;
37630 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
37631 insn
= emit_call_insn (insn
);
37633 /* Note use of the TOC register. */
37634 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
37637 /* Return whether we need to always update the saved TOC pointer when we update
37638 the stack pointer. */
37641 rs6000_save_toc_in_prologue_p (void)
37643 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
37646 #ifdef HAVE_GAS_HIDDEN
37647 # define USE_HIDDEN_LINKONCE 1
37649 # define USE_HIDDEN_LINKONCE 0
37652 /* Fills in the label name that should be used for a 476 link stack thunk. */
37655 get_ppc476_thunk_name (char name
[32])
37657 gcc_assert (TARGET_LINK_STACK
);
37659 if (USE_HIDDEN_LINKONCE
)
37660 sprintf (name
, "__ppc476.get_thunk");
37662 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
37665 /* This function emits the simple thunk routine that is used to preserve
37666 the link stack on the 476 cpu. */
37668 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
37670 rs6000_code_end (void)
37675 if (!TARGET_LINK_STACK
)
37678 get_ppc476_thunk_name (name
);
37680 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
37681 build_function_type_list (void_type_node
, NULL_TREE
));
37682 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
37683 NULL_TREE
, void_type_node
);
37684 TREE_PUBLIC (decl
) = 1;
37685 TREE_STATIC (decl
) = 1;
37688 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
37690 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
37691 targetm
.asm_out
.unique_section (decl
, 0);
37692 switch_to_section (get_named_section (decl
, NULL
, 0));
37693 DECL_WEAK (decl
) = 1;
37694 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
37695 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
37696 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
37697 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
37702 switch_to_section (text_section
);
37703 ASM_OUTPUT_LABEL (asm_out_file
, name
);
37706 DECL_INITIAL (decl
) = make_node (BLOCK
);
37707 current_function_decl
= decl
;
37708 allocate_struct_function (decl
, false);
37709 init_function_start (decl
);
37710 first_function_block_is_cold
= false;
37711 /* Make sure unwind info is emitted for the thunk if needed. */
37712 final_start_function (emit_barrier (), asm_out_file
, 1);
37714 fputs ("\tblr\n", asm_out_file
);
37716 final_end_function ();
37717 init_insn_lengths ();
37718 free_after_compilation (cfun
);
37720 current_function_decl
= NULL
;
37723 /* Add r30 to hard reg set if the prologue sets it up and it is not
37724 pic_offset_table_rtx. */
37727 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
37729 if (!TARGET_SINGLE_PIC_BASE
37731 && TARGET_MINIMAL_TOC
37732 && !constant_pool_empty_p ())
37733 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
37734 if (cfun
->machine
->split_stack_argp_used
)
37735 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
37739 /* Helper function for rs6000_split_logical to emit a logical instruction after
37740 spliting the operation to single GPR registers.
37742 DEST is the destination register.
37743 OP1 and OP2 are the input source registers.
37744 CODE is the base operation (AND, IOR, XOR, NOT).
37745 MODE is the machine mode.
37746 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37747 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37748 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37751 rs6000_split_logical_inner (rtx dest
,
37754 enum rtx_code code
,
37756 bool complement_final_p
,
37757 bool complement_op1_p
,
37758 bool complement_op2_p
)
37762 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37763 if (op2
&& GET_CODE (op2
) == CONST_INT
37764 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
37765 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
37767 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
37768 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
37770 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37775 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
37779 else if (value
== mask
)
37781 if (!rtx_equal_p (dest
, op1
))
37782 emit_insn (gen_rtx_SET (dest
, op1
));
37787 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37788 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37789 else if (code
== IOR
|| code
== XOR
)
37793 if (!rtx_equal_p (dest
, op1
))
37794 emit_insn (gen_rtx_SET (dest
, op1
));
37800 if (code
== AND
&& mode
== SImode
37801 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
37803 emit_insn (gen_andsi3 (dest
, op1
, op2
));
37807 if (complement_op1_p
)
37808 op1
= gen_rtx_NOT (mode
, op1
);
37810 if (complement_op2_p
)
37811 op2
= gen_rtx_NOT (mode
, op2
);
37813 /* For canonical RTL, if only one arm is inverted it is the first. */
37814 if (!complement_op1_p
&& complement_op2_p
)
37815 std::swap (op1
, op2
);
37817 bool_rtx
= ((code
== NOT
)
37818 ? gen_rtx_NOT (mode
, op1
)
37819 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
37821 if (complement_final_p
)
37822 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
37824 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
37827 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37828 operations are split immediately during RTL generation to allow for more
37829 optimizations of the AND/IOR/XOR.
37831 OPERANDS is an array containing the destination and two input operands.
37832 CODE is the base operation (AND, IOR, XOR, NOT).
37833 MODE is the machine mode.
37834 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37835 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37836 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37837 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37838 formation of the AND instructions. */
37841 rs6000_split_logical_di (rtx operands
[3],
37842 enum rtx_code code
,
37843 bool complement_final_p
,
37844 bool complement_op1_p
,
37845 bool complement_op2_p
)
37847 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
37848 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
37849 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
37850 enum hi_lo
{ hi
= 0, lo
= 1 };
37851 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
37854 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
37855 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
37856 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
37857 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
37860 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
37863 if (GET_CODE (operands
[2]) != CONST_INT
)
37865 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
37866 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
37870 HOST_WIDE_INT value
= INTVAL (operands
[2]);
37871 HOST_WIDE_INT value_hi_lo
[2];
37873 gcc_assert (!complement_final_p
);
37874 gcc_assert (!complement_op1_p
);
37875 gcc_assert (!complement_op2_p
);
37877 value_hi_lo
[hi
] = value
>> 32;
37878 value_hi_lo
[lo
] = value
& lower_32bits
;
37880 for (i
= 0; i
< 2; i
++)
37882 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
37884 if (sub_value
& sign_bit
)
37885 sub_value
|= upper_32bits
;
37887 op2_hi_lo
[i
] = GEN_INT (sub_value
);
37889 /* If this is an AND instruction, check to see if we need to load
37890 the value in a register. */
37891 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
37892 && !and_operand (op2_hi_lo
[i
], SImode
))
37893 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
37898 for (i
= 0; i
< 2; i
++)
37900 /* Split large IOR/XOR operations. */
37901 if ((code
== IOR
|| code
== XOR
)
37902 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
37903 && !complement_final_p
37904 && !complement_op1_p
37905 && !complement_op2_p
37906 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
37908 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
37909 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
37910 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
37911 rtx tmp
= gen_reg_rtx (SImode
);
37913 /* Make sure the constant is sign extended. */
37914 if ((hi_16bits
& sign_bit
) != 0)
37915 hi_16bits
|= upper_32bits
;
37917 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
37918 code
, SImode
, false, false, false);
37920 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
37921 code
, SImode
, false, false, false);
37924 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
37925 code
, SImode
, complement_final_p
,
37926 complement_op1_p
, complement_op2_p
);
37932 /* Split the insns that make up boolean operations operating on multiple GPR
37933 registers. The boolean MD patterns ensure that the inputs either are
37934 exactly the same as the output registers, or there is no overlap.
37936 OPERANDS is an array containing the destination and two input operands.
37937 CODE is the base operation (AND, IOR, XOR, NOT).
37938 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37939 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37940 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37943 rs6000_split_logical (rtx operands
[3],
37944 enum rtx_code code
,
37945 bool complement_final_p
,
37946 bool complement_op1_p
,
37947 bool complement_op2_p
)
37949 machine_mode mode
= GET_MODE (operands
[0]);
37950 machine_mode sub_mode
;
37952 int sub_size
, regno0
, regno1
, nregs
, i
;
37954 /* If this is DImode, use the specialized version that can run before
37955 register allocation. */
37956 if (mode
== DImode
&& !TARGET_POWERPC64
)
37958 rs6000_split_logical_di (operands
, code
, complement_final_p
,
37959 complement_op1_p
, complement_op2_p
);
37965 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
37966 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
37967 sub_size
= GET_MODE_SIZE (sub_mode
);
37968 regno0
= REGNO (op0
);
37969 regno1
= REGNO (op1
);
37971 gcc_assert (reload_completed
);
37972 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37973 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37975 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
37976 gcc_assert (nregs
> 1);
37978 if (op2
&& REG_P (op2
))
37979 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37981 for (i
= 0; i
< nregs
; i
++)
37983 int offset
= i
* sub_size
;
37984 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
37985 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
37986 rtx sub_op2
= ((code
== NOT
)
37988 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
37990 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
37991 complement_final_p
, complement_op1_p
,
37999 /* Return true if the peephole2 can combine a load involving a combination of
38000 an addis instruction and a load with an offset that can be fused together on
38004 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
38005 rtx addis_value
, /* addis value. */
38006 rtx target
, /* target register that is loaded. */
38007 rtx mem
) /* bottom part of the memory addr. */
38012 /* Validate arguments. */
38013 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38016 if (!base_reg_operand (target
, GET_MODE (target
)))
38019 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38022 /* Allow sign/zero extension. */
38023 if (GET_CODE (mem
) == ZERO_EXTEND
38024 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
38025 mem
= XEXP (mem
, 0);
38030 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
38033 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38034 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
38037 /* Validate that the register used to load the high value is either the
38038 register being loaded, or we can safely replace its use.
38040 This function is only called from the peephole2 pass and we assume that
38041 there are 2 instructions in the peephole (addis and load), so we want to
38042 check if the target register was not used in the memory address and the
38043 register to hold the addis result is dead after the peephole. */
38044 if (REGNO (addis_reg
) != REGNO (target
))
38046 if (reg_mentioned_p (target
, mem
))
38049 if (!peep2_reg_dead_p (2, addis_reg
))
38052 /* If the target register being loaded is the stack pointer, we must
38053 avoid loading any other value into it, even temporarily. */
38054 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
38058 base_reg
= XEXP (addr
, 0);
38059 return REGNO (addis_reg
) == REGNO (base_reg
);
38062 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38063 sequence. We adjust the addis register to use the target register. If the
38064 load sign extends, we adjust the code to do the zero extending load, and an
38065 explicit sign extension later since the fusion only covers zero extending
38069 operands[0] register set with addis (to be replaced with target)
38070 operands[1] value set via addis
38071 operands[2] target register being loaded
38072 operands[3] D-form memory reference using operands[0]. */
38075 expand_fusion_gpr_load (rtx
*operands
)
38077 rtx addis_value
= operands
[1];
38078 rtx target
= operands
[2];
38079 rtx orig_mem
= operands
[3];
38080 rtx new_addr
, new_mem
, orig_addr
, offset
;
38081 enum rtx_code plus_or_lo_sum
;
38082 machine_mode target_mode
= GET_MODE (target
);
38083 machine_mode extend_mode
= target_mode
;
38084 machine_mode ptr_mode
= Pmode
;
38085 enum rtx_code extend
= UNKNOWN
;
38087 if (GET_CODE (orig_mem
) == ZERO_EXTEND
38088 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
38090 extend
= GET_CODE (orig_mem
);
38091 orig_mem
= XEXP (orig_mem
, 0);
38092 target_mode
= GET_MODE (orig_mem
);
38095 gcc_assert (MEM_P (orig_mem
));
38097 orig_addr
= XEXP (orig_mem
, 0);
38098 plus_or_lo_sum
= GET_CODE (orig_addr
);
38099 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38101 offset
= XEXP (orig_addr
, 1);
38102 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38103 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38105 if (extend
!= UNKNOWN
)
38106 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
38108 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38109 UNSPEC_FUSION_GPR
);
38110 emit_insn (gen_rtx_SET (target
, new_mem
));
38112 if (extend
== SIGN_EXTEND
)
38114 int sub_off
= ((BYTES_BIG_ENDIAN
)
38115 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
38118 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
38120 emit_insn (gen_rtx_SET (target
,
38121 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
38127 /* Emit the addis instruction that will be part of a fused instruction
38131 emit_fusion_addis (rtx target
, rtx addis_value
, const char *comment
,
38132 const char *mode_name
)
38135 char insn_template
[80];
38136 const char *addis_str
= NULL
;
38137 const char *comment_str
= ASM_COMMENT_START
;
38139 if (*comment_str
== ' ')
38142 /* Emit the addis instruction. */
38143 fuse_ops
[0] = target
;
38144 if (satisfies_constraint_L (addis_value
))
38146 fuse_ops
[1] = addis_value
;
38147 addis_str
= "lis %0,%v1";
38150 else if (GET_CODE (addis_value
) == PLUS
)
38152 rtx op0
= XEXP (addis_value
, 0);
38153 rtx op1
= XEXP (addis_value
, 1);
38155 if (REG_P (op0
) && CONST_INT_P (op1
)
38156 && satisfies_constraint_L (op1
))
38160 addis_str
= "addis %0,%1,%v2";
38164 else if (GET_CODE (addis_value
) == HIGH
)
38166 rtx value
= XEXP (addis_value
, 0);
38167 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
38169 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
38170 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
38172 addis_str
= "addis %0,%2,%1@toc@ha";
38174 else if (TARGET_XCOFF
)
38175 addis_str
= "addis %0,%1@u(%2)";
38178 gcc_unreachable ();
38181 else if (GET_CODE (value
) == PLUS
)
38183 rtx op0
= XEXP (value
, 0);
38184 rtx op1
= XEXP (value
, 1);
38186 if (GET_CODE (op0
) == UNSPEC
38187 && XINT (op0
, 1) == UNSPEC_TOCREL
38188 && CONST_INT_P (op1
))
38190 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
38191 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
38194 addis_str
= "addis %0,%2,%1+%3@toc@ha";
38196 else if (TARGET_XCOFF
)
38197 addis_str
= "addis %0,%1+%3@u(%2)";
38200 gcc_unreachable ();
38204 else if (satisfies_constraint_L (value
))
38206 fuse_ops
[1] = value
;
38207 addis_str
= "lis %0,%v1";
38210 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
38212 fuse_ops
[1] = value
;
38213 addis_str
= "lis %0,%1@ha";
38218 fatal_insn ("Could not generate addis value for fusion", addis_value
);
38220 sprintf (insn_template
, "%s\t\t%s %s, type %s", addis_str
, comment_str
,
38221 comment
, mode_name
);
38222 output_asm_insn (insn_template
, fuse_ops
);
38225 /* Emit a D-form load or store instruction that is the second instruction
38226 of a fusion sequence. */
38229 emit_fusion_load_store (rtx load_store_reg
, rtx addis_reg
, rtx offset
,
38230 const char *insn_str
)
38233 char insn_template
[80];
38235 fuse_ops
[0] = load_store_reg
;
38236 fuse_ops
[1] = addis_reg
;
38238 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
38240 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
38241 fuse_ops
[2] = offset
;
38242 output_asm_insn (insn_template
, fuse_ops
);
38245 else if (GET_CODE (offset
) == UNSPEC
38246 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38249 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38251 else if (TARGET_XCOFF
)
38252 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38255 gcc_unreachable ();
38257 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38258 output_asm_insn (insn_template
, fuse_ops
);
38261 else if (GET_CODE (offset
) == PLUS
38262 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38263 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38264 && CONST_INT_P (XEXP (offset
, 1)))
38266 rtx tocrel_unspec
= XEXP (offset
, 0);
38268 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38270 else if (TARGET_XCOFF
)
38271 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38274 gcc_unreachable ();
38276 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38277 fuse_ops
[3] = XEXP (offset
, 1);
38278 output_asm_insn (insn_template
, fuse_ops
);
38281 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38283 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38285 fuse_ops
[2] = offset
;
38286 output_asm_insn (insn_template
, fuse_ops
);
38290 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38295 /* Wrap a TOC address that can be fused to indicate that special fusion
38296 processing is needed. */
38299 fusion_wrap_memory_address (rtx old_mem
)
38301 rtx old_addr
= XEXP (old_mem
, 0);
38302 rtvec v
= gen_rtvec (1, old_addr
);
38303 rtx new_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_FUSION_ADDIS
);
38304 return replace_equiv_address_nv (old_mem
, new_addr
, false);
38307 /* Given an address, convert it into the addis and load offset parts. Addresses
38308 created during the peephole2 process look like:
38309 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38310 (unspec [(...)] UNSPEC_TOCREL))
38312 Addresses created via toc fusion look like:
38313 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38316 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38320 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_FUSION_ADDIS
)
38322 lo
= XVECEXP (addr
, 0, 0);
38323 hi
= gen_rtx_HIGH (Pmode
, lo
);
38325 else if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38327 hi
= XEXP (addr
, 0);
38328 lo
= XEXP (addr
, 1);
38331 gcc_unreachable ();
38337 /* Return a string to fuse an addis instruction with a gpr load to the same
38338 register that we loaded up the addis instruction. The address that is used
38339 is the logical address that was formed during peephole2:
38340 (lo_sum (high) (low-part))
38342 Or the address is the TOC address that is wrapped before register allocation:
38343 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38345 The code is complicated, so we call output_asm_insn directly, and just
38349 emit_fusion_gpr_load (rtx target
, rtx mem
)
38354 const char *load_str
= NULL
;
38355 const char *mode_name
= NULL
;
38358 if (GET_CODE (mem
) == ZERO_EXTEND
)
38359 mem
= XEXP (mem
, 0);
38361 gcc_assert (REG_P (target
) && MEM_P (mem
));
38363 addr
= XEXP (mem
, 0);
38364 fusion_split_address (addr
, &addis_value
, &load_offset
);
38366 /* Now emit the load instruction to the same register. */
38367 mode
= GET_MODE (mem
);
38371 mode_name
= "char";
38376 mode_name
= "short";
38382 mode_name
= (mode
== SFmode
) ? "float" : "int";
38388 gcc_assert (TARGET_POWERPC64
);
38389 mode_name
= (mode
== DFmode
) ? "double" : "long";
38394 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38397 /* Emit the addis instruction. */
38398 emit_fusion_addis (target
, addis_value
, "gpr load fusion", mode_name
);
38400 /* Emit the D-form load instruction. */
38401 emit_fusion_load_store (target
, target
, load_offset
, load_str
);
38407 /* Return true if the peephole2 can combine a load/store involving a
38408 combination of an addis instruction and the memory operation. This was
38409 added to the ISA 3.0 (power9) hardware. */
38412 fusion_p9_p (rtx addis_reg
, /* register set via addis. */
38413 rtx addis_value
, /* addis value. */
38414 rtx dest
, /* destination (memory or register). */
38415 rtx src
) /* source (register or memory). */
38417 rtx addr
, mem
, offset
;
38418 machine_mode mode
= GET_MODE (src
);
38420 /* Validate arguments. */
38421 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38424 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38427 /* Ignore extend operations that are part of the load. */
38428 if (GET_CODE (src
) == FLOAT_EXTEND
|| GET_CODE (src
) == ZERO_EXTEND
)
38429 src
= XEXP (src
, 0);
38431 /* Test for memory<-register or register<-memory. */
38432 if (fpr_reg_operand (src
, mode
) || int_reg_operand (src
, mode
))
38440 else if (MEM_P (src
))
38442 if (!fpr_reg_operand (dest
, mode
) && !int_reg_operand (dest
, mode
))
38451 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38452 if (GET_CODE (addr
) == PLUS
)
38454 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38457 return satisfies_constraint_I (XEXP (addr
, 1));
38460 else if (GET_CODE (addr
) == LO_SUM
)
38462 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38465 offset
= XEXP (addr
, 1);
38466 if (TARGET_XCOFF
|| (TARGET_ELF
&& TARGET_POWERPC64
))
38467 return small_toc_ref (offset
, GET_MODE (offset
));
38469 else if (TARGET_ELF
&& !TARGET_POWERPC64
)
38470 return CONSTANT_P (offset
);
38476 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38480 operands[0] register set with addis
38481 operands[1] value set via addis
38482 operands[2] target register being loaded
38483 operands[3] D-form memory reference using operands[0].
38485 This is similar to the fusion introduced with power8, except it scales to
38486 both loads/stores and does not require the result register to be the same as
38487 the base register. At the moment, we only do this if register set with addis
38491 expand_fusion_p9_load (rtx
*operands
)
38493 rtx tmp_reg
= operands
[0];
38494 rtx addis_value
= operands
[1];
38495 rtx target
= operands
[2];
38496 rtx orig_mem
= operands
[3];
38497 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
;
38498 enum rtx_code plus_or_lo_sum
;
38499 machine_mode target_mode
= GET_MODE (target
);
38500 machine_mode extend_mode
= target_mode
;
38501 machine_mode ptr_mode
= Pmode
;
38502 enum rtx_code extend
= UNKNOWN
;
38504 if (GET_CODE (orig_mem
) == FLOAT_EXTEND
|| GET_CODE (orig_mem
) == ZERO_EXTEND
)
38506 extend
= GET_CODE (orig_mem
);
38507 orig_mem
= XEXP (orig_mem
, 0);
38508 target_mode
= GET_MODE (orig_mem
);
38511 gcc_assert (MEM_P (orig_mem
));
38513 orig_addr
= XEXP (orig_mem
, 0);
38514 plus_or_lo_sum
= GET_CODE (orig_addr
);
38515 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38517 offset
= XEXP (orig_addr
, 1);
38518 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38519 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38521 if (extend
!= UNKNOWN
)
38522 new_mem
= gen_rtx_fmt_e (extend
, extend_mode
, new_mem
);
38524 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38527 set
= gen_rtx_SET (target
, new_mem
);
38528 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
38529 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
38535 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38539 operands[0] register set with addis
38540 operands[1] value set via addis
38541 operands[2] target D-form memory being stored to
38542 operands[3] register being stored
38544 This is similar to the fusion introduced with power8, except it scales to
38545 both loads/stores and does not require the result register to be the same as
38546 the base register. At the moment, we only do this if register set with addis
38550 expand_fusion_p9_store (rtx
*operands
)
38552 rtx tmp_reg
= operands
[0];
38553 rtx addis_value
= operands
[1];
38554 rtx orig_mem
= operands
[2];
38555 rtx src
= operands
[3];
38556 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
, new_src
;
38557 enum rtx_code plus_or_lo_sum
;
38558 machine_mode target_mode
= GET_MODE (orig_mem
);
38559 machine_mode ptr_mode
= Pmode
;
38561 gcc_assert (MEM_P (orig_mem
));
38563 orig_addr
= XEXP (orig_mem
, 0);
38564 plus_or_lo_sum
= GET_CODE (orig_addr
);
38565 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38567 offset
= XEXP (orig_addr
, 1);
38568 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38569 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38571 new_src
= gen_rtx_UNSPEC (target_mode
, gen_rtvec (1, src
),
38574 set
= gen_rtx_SET (new_mem
, new_src
);
38575 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
38576 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
38582 /* Return a string to fuse an addis instruction with a load using extended
38583 fusion. The address that is used is the logical address that was formed
38584 during peephole2: (lo_sum (high) (low-part))
38586 The code is complicated, so we call output_asm_insn directly, and just
38590 emit_fusion_p9_load (rtx reg
, rtx mem
, rtx tmp_reg
)
38592 machine_mode mode
= GET_MODE (reg
);
38596 const char *load_string
;
38599 if (GET_CODE (mem
) == FLOAT_EXTEND
|| GET_CODE (mem
) == ZERO_EXTEND
)
38601 mem
= XEXP (mem
, 0);
38602 mode
= GET_MODE (mem
);
38605 if (GET_CODE (reg
) == SUBREG
)
38607 gcc_assert (SUBREG_BYTE (reg
) == 0);
38608 reg
= SUBREG_REG (reg
);
38612 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg
);
38615 if (FP_REGNO_P (r
))
38617 if (mode
== SFmode
)
38618 load_string
= "lfs";
38619 else if (mode
== DFmode
|| mode
== DImode
)
38620 load_string
= "lfd";
38622 gcc_unreachable ();
38624 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
38626 if (mode
== SFmode
)
38627 load_string
= "lxssp";
38628 else if (mode
== DFmode
|| mode
== DImode
)
38629 load_string
= "lxsd";
38631 gcc_unreachable ();
38633 else if (INT_REGNO_P (r
))
38638 load_string
= "lbz";
38641 load_string
= "lhz";
38645 load_string
= "lwz";
38649 if (!TARGET_POWERPC64
)
38650 gcc_unreachable ();
38651 load_string
= "ld";
38654 gcc_unreachable ();
38658 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg
);
38661 fatal_insn ("emit_fusion_p9_load not MEM", mem
);
38663 addr
= XEXP (mem
, 0);
38664 fusion_split_address (addr
, &hi
, &lo
);
38666 /* Emit the addis instruction. */
38667 emit_fusion_addis (tmp_reg
, hi
, "power9 load fusion", GET_MODE_NAME (mode
));
38669 /* Emit the D-form load instruction. */
38670 emit_fusion_load_store (reg
, tmp_reg
, lo
, load_string
);
38675 /* Return a string to fuse an addis instruction with a store using extended
38676 fusion. The address that is used is the logical address that was formed
38677 during peephole2: (lo_sum (high) (low-part))
38679 The code is complicated, so we call output_asm_insn directly, and just
38683 emit_fusion_p9_store (rtx mem
, rtx reg
, rtx tmp_reg
)
38685 machine_mode mode
= GET_MODE (reg
);
38689 const char *store_string
;
38692 if (GET_CODE (reg
) == SUBREG
)
38694 gcc_assert (SUBREG_BYTE (reg
) == 0);
38695 reg
= SUBREG_REG (reg
);
38699 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg
);
38702 if (FP_REGNO_P (r
))
38704 if (mode
== SFmode
)
38705 store_string
= "stfs";
38706 else if (mode
== DFmode
)
38707 store_string
= "stfd";
38709 gcc_unreachable ();
38711 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
38713 if (mode
== SFmode
)
38714 store_string
= "stxssp";
38715 else if (mode
== DFmode
|| mode
== DImode
)
38716 store_string
= "stxsd";
38718 gcc_unreachable ();
38720 else if (INT_REGNO_P (r
))
38725 store_string
= "stb";
38728 store_string
= "sth";
38732 store_string
= "stw";
38736 if (!TARGET_POWERPC64
)
38737 gcc_unreachable ();
38738 store_string
= "std";
38741 gcc_unreachable ();
38745 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg
);
38748 fatal_insn ("emit_fusion_p9_store not MEM", mem
);
38750 addr
= XEXP (mem
, 0);
38751 fusion_split_address (addr
, &hi
, &lo
);
38753 /* Emit the addis instruction. */
38754 emit_fusion_addis (tmp_reg
, hi
, "power9 store fusion", GET_MODE_NAME (mode
));
38756 /* Emit the D-form load instruction. */
38757 emit_fusion_load_store (reg
, tmp_reg
, lo
, store_string
);
38762 #ifdef RS6000_GLIBC_ATOMIC_FENV
38763 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38764 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
38767 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38770 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
38772 if (!TARGET_HARD_FLOAT
)
38774 #ifdef RS6000_GLIBC_ATOMIC_FENV
38775 if (atomic_hold_decl
== NULL_TREE
)
38778 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38779 get_identifier ("__atomic_feholdexcept"),
38780 build_function_type_list (void_type_node
,
38781 double_ptr_type_node
,
38783 TREE_PUBLIC (atomic_hold_decl
) = 1;
38784 DECL_EXTERNAL (atomic_hold_decl
) = 1;
38787 if (atomic_clear_decl
== NULL_TREE
)
38790 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38791 get_identifier ("__atomic_feclearexcept"),
38792 build_function_type_list (void_type_node
,
38794 TREE_PUBLIC (atomic_clear_decl
) = 1;
38795 DECL_EXTERNAL (atomic_clear_decl
) = 1;
38798 tree const_double
= build_qualified_type (double_type_node
,
38800 tree const_double_ptr
= build_pointer_type (const_double
);
38801 if (atomic_update_decl
== NULL_TREE
)
38804 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38805 get_identifier ("__atomic_feupdateenv"),
38806 build_function_type_list (void_type_node
,
38809 TREE_PUBLIC (atomic_update_decl
) = 1;
38810 DECL_EXTERNAL (atomic_update_decl
) = 1;
38813 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38814 TREE_ADDRESSABLE (fenv_var
) = 1;
38815 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
38817 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
38818 *clear
= build_call_expr (atomic_clear_decl
, 0);
38819 *update
= build_call_expr (atomic_update_decl
, 1,
38820 fold_convert (const_double_ptr
, fenv_addr
));
38825 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
38826 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
38827 tree call_mffs
= build_call_expr (mffs
, 0);
38829 /* Generates the equivalent of feholdexcept (&fenv_var)
38831 *fenv_var = __builtin_mffs ();
38833 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38834 __builtin_mtfsf (0xff, fenv_hold); */
38836 /* Mask to clear everything except for the rounding modes and non-IEEE
38837 arithmetic flag. */
38838 const unsigned HOST_WIDE_INT hold_exception_mask
=
38839 HOST_WIDE_INT_C (0xffffffff00000007);
38841 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38843 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
38845 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
38846 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38847 build_int_cst (uint64_type_node
,
38848 hold_exception_mask
));
38850 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38853 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
38854 build_int_cst (unsigned_type_node
, 0xff),
38857 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
38859 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38861 double fenv_clear = __builtin_mffs ();
38862 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38863 __builtin_mtfsf (0xff, fenv_clear); */
38865 /* Mask to clear everything except for the rounding modes and non-IEEE
38866 arithmetic flag. */
38867 const unsigned HOST_WIDE_INT clear_exception_mask
=
38868 HOST_WIDE_INT_C (0xffffffff00000000);
38870 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
38872 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
38874 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
38875 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
38877 build_int_cst (uint64_type_node
,
38878 clear_exception_mask
));
38880 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38881 fenv_clear_llu_and
);
38883 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
38884 build_int_cst (unsigned_type_node
, 0xff),
38887 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
38889 /* Generates the equivalent of feupdateenv (&fenv_var)
38891 double old_fenv = __builtin_mffs ();
38892 double fenv_update;
38893 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38894 (*(uint64_t*)fenv_var 0x1ff80fff);
38895 __builtin_mtfsf (0xff, fenv_update); */
38897 const unsigned HOST_WIDE_INT update_exception_mask
=
38898 HOST_WIDE_INT_C (0xffffffff1fffff00);
38899 const unsigned HOST_WIDE_INT new_exception_mask
=
38900 HOST_WIDE_INT_C (0x1ff80fff);
38902 tree old_fenv
= create_tmp_var_raw (double_type_node
);
38903 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
38905 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
38906 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
38907 build_int_cst (uint64_type_node
,
38908 update_exception_mask
));
38910 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38911 build_int_cst (uint64_type_node
,
38912 new_exception_mask
));
38914 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
38915 old_llu_and
, new_llu_and
);
38917 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38920 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
38921 build_int_cst (unsigned_type_node
, 0xff),
38922 fenv_update_mtfsf
);
38924 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
38928 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
38930 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38932 rtx_tmp0
= gen_reg_rtx (V2DImode
);
38933 rtx_tmp1
= gen_reg_rtx (V2DImode
);
38935 /* The destination of the vmrgew instruction layout is:
38936 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38937 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38938 vmrgew instruction will be correct. */
38939 if (VECTOR_ELT_ORDER_BIG
)
38941 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38942 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38946 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
38947 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
38950 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
38951 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
38953 if (signed_convert
)
38955 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
38956 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
38960 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
38961 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
38964 if (VECTOR_ELT_ORDER_BIG
)
38965 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
38967 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
38971 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
38974 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38976 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
38977 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
38979 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38980 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38982 rtx_tmp2
= gen_reg_rtx (V4SImode
);
38983 rtx_tmp3
= gen_reg_rtx (V4SImode
);
38985 if (signed_convert
)
38987 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
38988 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
38992 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
38993 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
38996 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
38999 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39002 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
39003 optimization_type opt_type
)
39008 return (opt_type
== OPTIMIZE_FOR_SPEED
39009 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
39016 struct gcc_target targetm
= TARGET_INITIALIZER
;
39018 #include "gt-rs6000.h"